diff --git a/.gitignore b/.gitignore index da47590a2fac52879c470e3c6b89dffaa57d775a..50f4251320abc80358b67eab22c02672d5f26bd6 100644 --- a/.gitignore +++ b/.gitignore @@ -68,6 +68,8 @@ CMakeError.log *.o version.c taos.rc +src/connector/jdbc/.classpath +src/connector/jdbc/.project src/connector/jdbc/.settings/ tests/comparisonTest/cassandra/cassandratest/.classpath tests/comparisonTest/cassandra/cassandratest/.project diff --git a/.gitmodules b/.gitmodules index 346f5c00699e51eac39dbfaffdbf96656052b024..a2266c46afd180b52d3aa19003380078894f6a4b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -13,3 +13,6 @@ [submodule "deps/jemalloc"] path = deps/jemalloc url = https://github.com/jemalloc/jemalloc +[submodule "deps/TSZ"] + path = deps/TSZ + url = https://github.com/taosdata/TSZ.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 6f50aca0797cb2d3a2b61796d43137fb417f76ee..093731f190a380539cca3db8f8c12793d4b6557c 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,3 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) IF (CMAKE_VERSION VERSION_LESS 3.0) PROJECT(TDengine CXX) SET(PROJECT_VERSION_MAJOR "${LIB_MAJOR_VERSION}") @@ -10,6 +9,12 @@ ELSE () PROJECT(TDengine VERSION "${LIB_VERSION_STRING}" LANGUAGES CXX) ENDIF () +IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +ELSE () + CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +ENDIF () + SET(TD_ACCOUNT FALSE) SET(TD_ADMIN FALSE) SET(TD_GRANT FALSE) @@ -42,13 +47,6 @@ INCLUDE(cmake/env.inc) INCLUDE(cmake/version.inc) INCLUDE(cmake/install.inc) -IF (CMAKE_SYSTEM_NAME MATCHES "Linux") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pipe -Wall -Wshadow -Werror") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pipe -Wall -Wshadow -Werror") -ENDIF () -MESSAGE(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}") -MESSAGE(STATUS "CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}") - ADD_SUBDIRECTORY(deps) ADD_SUBDIRECTORY(src) ADD_SUBDIRECTORY(tests) diff --git a/Jenkinsfile b/Jenkinsfile index b48dca02419a3e4f3f78ea1a16d93fd376e9d703..28f1cb0bc0e652bcb546995c569f572096afbf28 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -119,16 +119,32 @@ pipeline { abortPreviousBuilds() } sh''' + rm -rf ${WORKSPACE}.tes cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes - git checkout develop - git pull + + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + git checkout master + git pull origin master + ''' + } + else { + sh ''' + git checkout develop + git pull origin develop + ''' + } + } + sh''' git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD ''' script{ - env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD develop|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) + env.skipstage=sh(script:"cd ${WORKSPACE}.tes && git --no-pager diff --name-only FETCH_HEAD ${env.CHANGE_TARGET}|grep -v -E '.*md|//src//connector|Jenkinsfile|test-all.sh' || echo 0 ",returnStdout:true) } println env.skipstage sh''' diff --git a/README-CN.md b/README-CN.md index afb242d6219069887be615b606f3a0eb5206422c..a9bc814e8d6f6bef0ad94e29588f62e2e4c0e7f1 100644 --- a/README-CN.md +++ b/README-CN.md @@ -23,7 +23,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维 TDengine是一个高效的存储、查询、分析时序大数据的平台,专为物联网、车联网、工业互联网、运维监测等优化而设计。您可以像使用关系型数据库MySQL一样来使用它,但建议您在使用前仔细阅读一遍下面的文档,特别是 [数据模型](https://www.taosdata.com/cn/documentation/architecture) 与 [数据建模](https://www.taosdata.com/cn/documentation/model)。除本文档之外,欢迎 [下载产品白皮书](https://www.taosdata.com/downloads/TDengine%20White%20Paper.pdf)。 -# 生成 +# 构建 TDengine目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、macOS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。本快速指南仅适用于通过源码安装。 @@ -107,7 +107,7 @@ Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话 git submodule update --init --recursive ``` -## 生成 TDengine +## 构建 TDengine ### Linux 系统 @@ -116,6 +116,12 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` +您可以选择使用 Jemalloc 作为内存分配器,替代默认的 glibc: +```bash +apt install autoconf +cmake .. -DJEMALLOC_ENABLED=true +``` + 在X86-64、X86、arm64、arm32 和 mips64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 或 aarch32 等。 aarch64: @@ -179,9 +185,10 @@ cmake .. && cmake --build . # 安装 -如果你不想安装,可以直接在shell中运行。生成完成后,安装 TDengine: +生成完成后,安装 TDengine(下文给出的指令以 Linux 为例,如果是在 Windows 下,那么对应的指令会是 `nmake install`): + ```bash -make install +sudo make install ``` 用户可以在[文件目录结构](https://www.taosdata.com/cn/documentation/administrator#directories)中了解更多在操作系统中生成的目录或文件。 @@ -189,7 +196,7 @@ make install 安装成功后,在终端中启动 TDengine 服务: ```bash -taosd +sudo systemctl start taosd ``` 用户可以使用 TDengine Shell 来连接 TDengine 服务,在终端中,输入: @@ -202,7 +209,7 @@ taos ## 快速运行 -TDengine 生成后,在终端执行以下命令: +如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ): ```bash ./build/bin/taosd -c test/cfg diff --git a/README.md b/README.md index 0e1adcd97c61265ec0ad272043c9604736545b3d..2dea05f09d268b0d78de15ab98f3584df055c353 100644 --- a/README.md +++ b/README.md @@ -110,6 +110,12 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` +You can use Jemalloc as memory allocator instead of glibc: +``` +apt install autoconf +cmake .. -DJEMALLOC_ENABLED=true +``` + TDengine build script can detect the host machine's architecture on X86-64, X86, arm64, arm32 and mips64 platform. You can also specify CPUTYPE option like aarch64 or aarch32 too if the detection result is not correct: @@ -169,7 +175,7 @@ cmake .. && cmake --build . # Installing -After building successfully, TDengine can be installed by: +After building successfully, TDengine can be installed by: (On Windows platform, the following command should be `nmake install`) ```bash sudo make install ``` @@ -191,7 +197,7 @@ If TDengine shell connects the server successfully, welcome messages and version ## Quick Run -If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: +If you don't want to run TDengine as a service, you can run it in current shell. For example, to quickly start a TDengine server after building, run the command below in terminal: (We take Linux as an example, command on Windows will be `taosd.exe`) ```bash ./build/bin/taosd -c test/cfg ``` diff --git a/alert/release.sh b/alert/release.sh index 35eb4d677f2a6f07699c2668ea67d44c5af44c92..20317e41663de528c0fcaa42621db57b7b5a82dc 100755 --- a/alert/release.sh +++ b/alert/release.sh @@ -7,8 +7,9 @@ set -e cpuType=amd64 # [armv6l | arm64 | amd64 | 386] osType=linux # [linux | darwin | windows] version="" +verType=stable # [stable, beta] declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86") -while getopts "h:c:o:n:" arg +while getopts "h:c:o:n:V:" arg do case $arg in c) @@ -23,6 +24,10 @@ do #echo "version=$OPTARG" version=$(echo $OPTARG) ;; + V) + #echo "verType=$OPTARG" + verType=$(echo $OPTARG) + ;; h) echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]" exit 0 @@ -55,6 +60,22 @@ cp alert alert.cfg install_driver.sh ./TDengine-alert/. cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/. chmod 777 ./TDengine-alert/install_driver.sh -tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/ +tar -I 'gzip -9' -cf ${scriptdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/ rm -rf ./TDengine-alert + + +# mv package to comminuty/release/ +pkg_name=TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]} + +if [ "$verType" == "beta" ]; then + pkg_name=TDengine-alert-${version}-${verType}-${osType^}-${archMap[${cpuType}]} +elif [ "$verType" == "stable" ]; then + pkg_name=${pkg_name} +else + echo "unknow verType, nor stabel or beta" + exit 1 +fi + +cd ${scriptdir}/../release/ +mv ${scriptdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz ${pkg_name}.tar.gz diff --git a/cmake/define.inc b/cmake/define.inc index a15e0aecbb2d30ad2ec7aa1c5761c9d2a40f3323..6c466fee026097b0bdeb89c7a4fc54fc382c2726 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_ACCOUNT) @@ -41,6 +41,10 @@ IF (TD_POWER) ADD_DEFINITIONS(-D_TD_POWER_) ENDIF () +IF (TD_TQ) + ADD_DEFINITIONS(-D_TD_TQ_) +ENDIF () + IF (TD_MEM_CHECK) ADD_DEFINITIONS(-DTAOS_MEM_CHECK) ENDIF () @@ -57,7 +61,7 @@ IF (TD_LINUX_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_LINUX_64) MESSAGE(STATUS "linux64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DUSE_LIBICONV) IF (JEMALLOC_ENABLED) @@ -70,7 +74,7 @@ IF (TD_LINUX_32) ADD_DEFINITIONS(-D_TD_LINUX_32) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "linux32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_64) @@ -78,7 +82,9 @@ IF (TD_ARM_64) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "arm64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src) ENDIF () IF (TD_ARM_32) @@ -86,7 +92,9 @@ IF (TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "arm32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") + SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") + + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src) ENDIF () IF (TD_MIPS_64) @@ -94,7 +102,7 @@ IF (TD_MIPS_64) ADD_DEFINITIONS(-D_TD_MIPS_64) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_MIPS_32) @@ -102,7 +110,7 @@ IF (TD_MIPS_32) ADD_DEFINITIONS(-D_TD_MIPS_32) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_APLHINE) @@ -139,6 +147,7 @@ IF (TD_LINUX) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src) ENDIF () IF (TD_DARWIN_64) @@ -147,7 +156,11 @@ IF (TD_DARWIN_64) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "darwin64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + IF ("${CPUTYPE}" STREQUAL "apple_m1") + SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + ELSE () + SET(COMMON_FLAGS "-Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + ENDIF () IF (TD_MEMORY_SANITIZER) SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG") ELSE () @@ -156,6 +169,7 @@ IF (TD_DARWIN_64) SET(RELEASE_FLAGS "-Og") INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src) ENDIF () IF (TD_WINDOWS) @@ -178,7 +192,7 @@ IF (TD_WINDOWS) MESSAGE("memory sanitizer detected as false") SET(DEBUG_FLAGS "/Zi /W3 /GL") ENDIF () - SET(RELEASE_FLAGS "/W0 /O3 /GL") + SET(RELEASE_FLAGS "/W0 /O2 /GL") # MSVC only support O2 ENDIF () INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/pthread) @@ -186,6 +200,7 @@ IF (TD_WINDOWS) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/wepoll/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MsvcLibX/include) + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lua/src) ENDIF () IF (TD_WINDOWS_64) @@ -201,6 +216,10 @@ IF (TD_WINDOWS_32) MESSAGE(STATUS "windows32 is defined") ENDIF () +IF (TD_LINUX) + SET(COMMON_FLAGS "${COMMON_FLAGS} -pipe -Wshadow") +ENDIF () + INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/os/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc) diff --git a/cmake/env.inc b/cmake/env.inc index 6c1ce8fd89847b41f0cdb595384c2772823c7885..2ceaecc2d9e486c249931ae45089e6a820e475b9 100755 --- a/cmake/env.inc +++ b/cmake/env.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) SET(CMAKE_C_STANDARD 11) @@ -35,13 +35,13 @@ ENDIF () # Set compiler options SET(COMMON_C_FLAGS "${COMMON_FLAGS} -std=gnu99") -SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_FLAGS} ${DEBUG_FLAGS}") -SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FLAGS}") +SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ${COMMON_C_FLAGS} ${DEBUG_FLAGS}") +SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_C_FLAGS} ${RELEASE_FLAGS}") # Set c++ compiler options -# SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11") -# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}") -# SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}") +SET(COMMON_CXX_FLAGS "${COMMON_FLAGS} -std=c++11 -Wno-unused-function") +SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} ${COMMON_CXX_FLAGS} ${DEBUG_FLAGS}") +SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}") IF (${CMAKE_BUILD_TYPE} MATCHES "Debug") SET(CMAKE_BUILD_TYPE "Debug") diff --git a/cmake/input.inc b/cmake/input.inc index c073bbbc037d105dbacea1e14981537cd31b7202..9d716e1e7345955f7b6b844c85ace7e7bd5c6080 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (${ACCOUNT} MATCHES "true") @@ -46,6 +46,9 @@ ENDIF () IF (${DBNAME} MATCHES "power") SET(TD_POWER TRUE) MESSAGE(STATUS "power is true") +ELSEIF (${DBNAME} MATCHES "tq") + SET(TD_TQ TRUE) + MESSAGE(STATUS "tq is true") ENDIF () IF (${DLLTYPE} MATCHES "go") @@ -88,3 +91,12 @@ SET(TD_MEMORY_SANITIZER FALSE) IF (${MEMORY_SANITIZER} MATCHES "true") SET(TD_MEMORY_SANITIZER TRUE) ENDIF () + +IF (${TSZ_ENABLED} MATCHES "true") + # define add + MESSAGE(STATUS "build with TSZ enabled") + ADD_DEFINITIONS(-DTD_TSZ) + set(VAR_TSZ "TSZ" CACHE INTERNAL "global variant tsz" ) +ELSE() + set(VAR_TSZ "" CACHE INTERNAL "global variant empty" ) +ENDIF() diff --git a/cmake/install.inc b/cmake/install.inc index b37cf751fbf23966671a571385b65d93bd22865d..e9ad240a793b9736edbe5769c6af12276e13a1a6 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.31.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.34-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/platform.inc b/cmake/platform.inc index 5f7391c996120bb44a96a09b659147be90985d30..a78082a1fc62a8ad66c54dcf005e3e15edf5f5f0 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) # @@ -108,6 +108,10 @@ IF ("${CPUTYPE}" STREQUAL "") SET(TD_LINUX TRUE) SET(TD_LINUX_64 FALSE) SET(TD_MIPS_64 TRUE) + ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") + SET(CPUTYPE "apple_m1") + MESSAGE(STATUS "Set CPUTYPE to apple silicon m1") + SET(TD_ARM_64 TRUE) ENDIF () ELSE () @@ -153,5 +157,5 @@ ELSEIF (${OSTYPE} MATCHES "Alpine") MESSAGE(STATUS "input osType: Alpine") SET(TD_APLHINE TRUE) ELSE () - MESSAGE(STATUS "input osType unknown: " ${OSTYPE}) + MESSAGE(STATUS "The user specified osType is unknown: " ${OSTYPE}) ENDIF () diff --git a/cmake/version.inc b/cmake/version.inc index ed8e7a156cc8fffd231889932940d4ff68614102..f576627908721da5a2e3e69a02e7ba405d47e3a7 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -1,16 +1,16 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.1.1.0") + SET(TD_VER_NUMBER "2.1.5.0") ENDIF () IF (DEFINED VERCOMPATIBLE) SET(TD_VER_COMPATIBLE ${VERCOMPATIBLE}) ELSE () - SET(TD_VER_COMPATIBLE "2.0.0.0") + SET(TD_VER_COMPATIBLE "1.0.0.0") ENDIF () find_program(HAVE_GIT NAMES git) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 99152c6ce365768b3b782809cca5aacbec1ef7fd..516c752bd101f26f04c3986ed50edd55121c5a40 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -1,6 +1,11 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) +IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +ELSE () + CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +ENDIF () + ADD_SUBDIRECTORY(zlib-1.2.11) ADD_SUBDIRECTORY(pthread) ADD_SUBDIRECTORY(regex) @@ -10,6 +15,7 @@ ADD_SUBDIRECTORY(cJson) ADD_SUBDIRECTORY(wepoll) ADD_SUBDIRECTORY(MsvcLibX) ADD_SUBDIRECTORY(rmonotonic) +ADD_SUBDIRECTORY(lua) IF (TD_LINUX AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) @@ -20,7 +26,7 @@ IF (TD_DARWIN AND TD_MQTT) ENDIF () IF (TD_LINUX_64 AND JEMALLOC_ENABLED) - MESSAGE("setup dpes/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) + MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) MESSAGE("binary dir:" ${CMAKE_BINARY_DIR}) include(ExternalProject) ExternalProject_Add(jemalloc @@ -31,3 +37,7 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED) BUILD_COMMAND ${MAKE} ) ENDIF () + +IF (${TSZ_ENABLED} MATCHES "true") + ADD_SUBDIRECTORY(TSZ) +ENDIF() \ No newline at end of file diff --git a/deps/MQTT-C/CMakeLists.txt b/deps/MQTT-C/CMakeLists.txt index 15b35525210ec90e6e2efbdcd0e6128cb4d34f91..37959140e70d4808c845e3ca6e415ce8bdecf3ac 100644 --- a/deps/MQTT-C/CMakeLists.txt +++ b/deps/MQTT-C/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) # MQTT-C build options option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF) diff --git a/deps/MsvcLibX/CMakeLists.txt b/deps/MsvcLibX/CMakeLists.txt index 4428579e1c098425c9d72d7d58a5fda15cd34c93..4197f502b131b8dc7ae289fd822e15f8a6522cbf 100644 --- a/deps/MsvcLibX/CMakeLists.txt +++ b/deps/MsvcLibX/CMakeLists.txt @@ -1,6 +1,11 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) +IF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +ELSE () + CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +ENDIF () + IF (TD_WINDOWS) INCLUDE_DIRECTORIES(include) AUX_SOURCE_DIRECTORY(src SRC) diff --git a/deps/TSZ b/deps/TSZ new file mode 160000 index 0000000000000000000000000000000000000000..0ca5b15a8eac40327dd737be52c926fa5675712c --- /dev/null +++ b/deps/TSZ @@ -0,0 +1 @@ +Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c diff --git a/deps/iconv/CMakeLists.txt b/deps/iconv/CMakeLists.txt index 286070fa9071f8fcd1949850cec87c1ced3245d7..ab5fa1a5d1f409496118dc6212fb6f1512b51bb2 100644 --- a/deps/iconv/CMakeLists.txt +++ b/deps/iconv/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/lua/CMakeLists.txt b/deps/lua/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e34d447750046843ad328f0dc3edecde07c385c --- /dev/null +++ b/deps/lua/CMakeLists.txt @@ -0,0 +1,7 @@ +AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SOURCE_LIST) + +ADD_LIBRARY(lua ${SOURCE_LIST}) +TARGET_INCLUDE_DIRECTORIES(lua PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/inc) +SET_SOURCE_FILES_PROPERTIES(./src/lgc.c PROPERTIES COMPILE_FLAGS -w) +SET_SOURCE_FILES_PROPERTIES(./src/ltable.c PROPERTIES COMPILE_FLAGS -w) +SET_SOURCE_FILES_PROPERTIES(./src/loslib.c PROPERTIES COMPILE_FLAGS -w) diff --git a/deps/lua/COPYRIGHT b/deps/lua/COPYRIGHT new file mode 100644 index 0000000000000000000000000000000000000000..84d401b1e4d1e8f952943df8a800f9383aa23fe1 --- /dev/null +++ b/deps/lua/COPYRIGHT @@ -0,0 +1,34 @@ +Lua License +----------- + +Lua is licensed under the terms of the MIT license reproduced below. +This means that Lua is free software and can be used for both academic +and commercial purposes at absolutely no cost. + +For details and rationale, see http://www.lua.org/license.html . + +=============================================================================== + +Copyright (C) 1994-2006 Lua.org, PUC-Rio. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +=============================================================================== + +(end of COPYRIGHT) diff --git a/deps/lua/HISTORY b/deps/lua/HISTORY new file mode 100644 index 0000000000000000000000000000000000000000..d807a533835a9f7c3976a97839dc9dca72e679d3 --- /dev/null +++ b/deps/lua/HISTORY @@ -0,0 +1,183 @@ +HISTORY for Lua 5.1 + +* Changes from version 5.0 to 5.1 + ------------------------------- + Language: + + new module system. + + new semantics for control variables of fors. + + new semantics for setn/getn. + + new syntax/semantics for varargs. + + new long strings and comments. + + new `mod' operator (`%') + + new length operator #t + + metatables for all types + API: + + new functions: lua_createtable, lua_get(set)field, lua_push(to)integer. + + user supplies memory allocator (lua_open becomes lua_newstate). + + luaopen_* functionst must be called through Lua. + Implementation: + + new configuration scheme via luaconf.h. + + incremental garbage collection. + + better handling of end-of-line in the lexer. + + fully reentrant parser (new Lua function `load') + + better support for 64-bit machines. + + native loadlib support for Mac OS X. + + standard distribution in only one library (lualib.a merged into lua.a) + +* Changes from version 4.0 to 5.0 + ------------------------------- + Language: + + lexical scoping. + + Lua coroutines. + + standard libraries now packaged in tables. + + tags replaced by metatables and tag methods replaced by metamethods, + stored in metatables. + + proper tail calls. + + each function can have its own global table, which can be shared. + + new __newindex metamethod, called when we insert a new key into a table. + + new block comments: --[[ ... ]]. + + new generic for. + + new weak tables. + + new boolean type. + + new syntax "local function". + + (f()) returns the first value returned by f. + + {f()} fills a table with all values returned by f. + + \n ignored in [[\n . + + fixed and-or priorities. + + more general syntax for function definition (e.g. function a.x.y:f()...end). + + more general syntax for function calls (e.g. (print or write)(9)). + + new functions (time/date, tmpfile, unpack, require, load*, etc.). + API: + + chunks are loaded by using lua_load; new luaL_loadfile and luaL_loadbuffer. + + introduced lightweight userdata, a simple "void*" without a metatable. + + new error handling protocol: the core no longer prints error messages; + all errors are reported to the caller on the stack. + + new lua_atpanic for host cleanup. + + new, signal-safe, hook scheme. + Implementation: + + new license: MIT. + + new, faster, register-based virtual machine. + + support for external multithreading and coroutines. + + new and consistent error message format. + + the core no longer needs "stdio.h" for anything (except for a single + use of sprintf to convert numbers to strings). + + lua.c now runs the environment variable LUA_INIT, if present. It can + be "@filename", to run a file, or the chunk itself. + + support for user extensions in lua.c. + sample implementation given for command line editing. + + new dynamic loading library, active by default on several platforms. + + safe garbage-collector metamethods. + + precompiled bytecodes checked for integrity (secure binary dostring). + + strings are fully aligned. + + position capture in string.find. + + read('*l') can read lines with embedded zeros. + +* Changes from version 3.2 to 4.0 + ------------------------------- + Language: + + new "break" and "for" statements (both numerical and for tables). + + uniform treatment of globals: globals are now stored in a Lua table. + + improved error messages. + + no more '$debug': full speed *and* full debug information. + + new read form: read(N) for next N bytes. + + general read patterns now deprecated. + (still available with -DCOMPAT_READPATTERNS.) + + all return values are passed as arguments for the last function + (old semantics still available with -DLUA_COMPAT_ARGRET) + + garbage collection tag methods for tables now deprecated. + + there is now only one tag method for order. + API: + + New API: fully re-entrant, simpler, and more efficient. + + New debug API. + Implementation: + + faster than ever: cleaner virtual machine and new hashing algorithm. + + non-recursive garbage-collector algorithm. + + reduced memory usage for programs with many strings. + + improved treatment for memory allocation errors. + + improved support for 16-bit machines (we hope). + + code now compiles unmodified as both ANSI C and C++. + + numbers in bases other than 10 are converted using strtoul. + + new -f option in Lua to support #! scripts. + + luac can now combine text and binaries. + +* Changes from version 3.1 to 3.2 + ------------------------------- + + redirected all output in Lua's core to _ERRORMESSAGE and _ALERT. + + increased limit on the number of constants and globals per function + (from 2^16 to 2^24). + + debugging info (lua_debug and hooks) moved into lua_state and new API + functions provided to get and set this info. + + new debug lib gives full debugging access within Lua. + + new table functions "foreachi", "sort", "tinsert", "tremove", "getn". + + new io functions "flush", "seek". + +* Changes from version 3.0 to 3.1 + ------------------------------- + + NEW FEATURE: anonymous functions with closures (via "upvalues"). + + new syntax: + - local variables in chunks. + - better scope control with DO block END. + - constructors can now be also written: { record-part; list-part }. + - more general syntax for function calls and lvalues, e.g.: + f(x).y=1 + o:f(x,y):g(z) + f"string" is sugar for f("string") + + strings may now contain arbitrary binary data (e.g., embedded zeros). + + major code re-organization and clean-up; reduced module interdependecies. + + no arbitrary limits on the total number of constants and globals. + + support for multiple global contexts. + + better syntax error messages. + + new traversal functions "foreach" and "foreachvar". + + the default for numbers is now double. + changing it to use floats or longs is easy. + + complete debug information stored in pre-compiled chunks. + + sample interpreter now prompts user when run interactively, and also + handles control-C interruptions gracefully. + +* Changes from version 2.5 to 3.0 + ------------------------------- + + NEW CONCEPT: "tag methods". + Tag methods replace fallbacks as the meta-mechanism for extending the + semantics of Lua. Whereas fallbacks had a global nature, tag methods + work on objects having the same tag (e.g., groups of tables). + Existing code that uses fallbacks should work without change. + + new, general syntax for constructors {[exp] = exp, ... }. + + support for handling variable number of arguments in functions (varargs). + + support for conditional compilation ($if ... $else ... $end). + + cleaner semantics in API simplifies host code. + + better support for writing libraries (auxlib.h). + + better type checking and error messages in the standard library. + + luac can now also undump. + +* Changes from version 2.4 to 2.5 + ------------------------------- + + io and string libraries are now based on pattern matching; + the old libraries are still available for compatibility + + dofile and dostring can now return values (via return statement) + + better support for 16- and 64-bit machines + + expanded documentation, with more examples + +* Changes from version 2.2 to 2.4 + ------------------------------- + + external compiler creates portable binary files that can be loaded faster + + interface for debugging and profiling + + new "getglobal" fallback + + new functions for handling references to Lua objects + + new functions in standard lib + + only one copy of each string is stored + + expanded documentation, with more examples + +* Changes from version 2.1 to 2.2 + ------------------------------- + + functions now may be declared with any "lvalue" as a name + + garbage collection of functions + + support for pipes + +* Changes from version 1.1 to 2.1 + ------------------------------- + + object-oriented support + + fallbacks + + simplified syntax for tables + + many internal improvements + +(end of HISTORY) diff --git a/deps/lua/INSTALL b/deps/lua/INSTALL new file mode 100644 index 0000000000000000000000000000000000000000..65f6f1eb07bf3e62fc0fbffd70a970827eb138bc --- /dev/null +++ b/deps/lua/INSTALL @@ -0,0 +1,96 @@ +INSTALL for Lua 5.1 + +* Building Lua + ------------ + Lua is built in the src directory, but the build process can be + controlled from the top-level Makefile. + + Building Lua on Unix systems should be very easy. First do "make" and + see if your platform is listed. If so, just do "make xxx", where xxx + is your platform name. The platforms currently supported are: + ansi bsd generic linux macosx mingw posix solaris + + See below for customization instructions and for instructions on how + to build with other Windows compilers. + + If you want to check that Lua has been built correctly, do "make test" + after building Lua. Also, have a look at the example programs in test. + +* Installing Lua + -------------- + Once you have built Lua, you may want to install it in an official + place in your system. In this case, do "make install". The official + place and the way to install files are defined in Makefile. You must + have the right permissions to install files. + + If you want to build and install Lua in one step, do "make xxx install", + where xxx is your platform name. + + If you want to install Lua locally, then do "make local". This will + create directories bin, include, lib, man, and install Lua there as + follows: + + bin: lua luac + include: lua.h luaconf.h lualib.h lauxlib.h lua.hpp + lib: liblua.a + man/man1: lua.1 luac.1 + + These are the only directories you need for development. + + There are man pages for lua and luac, in both nroff and html, and a + reference manual in html in doc, some sample code in test, and some + useful stuff in etc. You don't need these directories for development. + + If you want to install Lua locally, but in some other directory, do + "make install INSTALL_TOP=xxx", where xxx is your chosen directory. + + See below for instructions for Windows and other systems. + +* Customization + ------------- + Three things can be customized by editing a file: + - Where and how to install Lua -- edit Makefile. + - How to build Lua -- edit src/Makefile. + - Lua features -- edit src/luaconf.h. + + You don't actually need to edit the Makefiles because you may set the + relevant variables when invoking make. + + On the other hand, if you need to select some Lua features, you'll need + to edit src/luaconf.h. The edited file will be the one installed, and + it will be used by any Lua clients that you build, to ensure consistency. + + We strongly recommend that you enable dynamic loading. This is done + automatically for all platforms listed above that have this feature + (and also Windows). See src/luaconf.h and also src/Makefile. + +* Building Lua on Windows and other systems + ----------------------------------------- + If you're not using the usual Unix tools, then the instructions for + building Lua depend on the compiler you use. You'll need to create + projects (or whatever your compiler uses) for building the library, + the interpreter, and the compiler, as follows: + + library: lapi.c lcode.c ldebug.c ldo.c ldump.c lfunc.c lgc.c llex.c + lmem.c lobject.c lopcodes.c lparser.c lstate.c lstring.c + ltable.c ltm.c lundump.c lvm.c lzio.c + lauxlib.c lbaselib.c ldblib.c liolib.c lmathlib.c loslib.c + ltablib.c lstrlib.c loadlib.c linit.c + + interpreter: library, lua.c + + compiler: library, luac.c print.c + + If you use Visual Studio .NET, you can use etc/luavs.bat + in its "Command Prompt". + + If all you want is to build the Lua interpreter, you may put all .c files + in a single project, except for luac.c and print.c. Or just use etc/all.c. + + To use Lua as a library in your own programs, you'll need to know how to + create and use libraries with your compiler. + + As mentioned above, you may edit luaconf.h to select some features before + building Lua. + +(end of INSTALL) diff --git a/deps/lua/MANIFEST b/deps/lua/MANIFEST new file mode 100644 index 0000000000000000000000000000000000000000..fd18c8bb9e3acbcabf2c4a5e5c8c2158c94c1635 --- /dev/null +++ b/deps/lua/MANIFEST @@ -0,0 +1,108 @@ +MANIFEST contents of Lua 5.1 distribution on Mon Feb 20 11:37:30 BRT 2006 +lua-5.1 +lua-5.1/COPYRIGHT +lua-5.1/HISTORY +lua-5.1/INSTALL +lua-5.1/MANIFEST +lua-5.1/Makefile +lua-5.1/README +lua-5.1/doc +lua-5.1/doc/contents.html +lua-5.1/doc/logo.gif +lua-5.1/doc/lua.1 +lua-5.1/doc/lua.css +lua-5.1/doc/lua.html +lua-5.1/doc/luac.1 +lua-5.1/doc/luac.html +lua-5.1/doc/manual.html +lua-5.1/doc/readme.html +lua-5.1/etc +lua-5.1/etc/Makefile +lua-5.1/etc/README +lua-5.1/etc/all.c +lua-5.1/etc/lua.hpp +lua-5.1/etc/lua.ico +lua-5.1/etc/lua.pc +lua-5.1/etc/luavs.bat +lua-5.1/etc/min.c +lua-5.1/etc/noparser.c +lua-5.1/etc/strict.lua +lua-5.1/src +lua-5.1/src/Makefile +lua-5.1/src/lapi.c +lua-5.1/src/lapi.h +lua-5.1/src/lauxlib.c +lua-5.1/src/lauxlib.h +lua-5.1/src/lbaselib.c +lua-5.1/src/lcode.c +lua-5.1/src/lcode.h +lua-5.1/src/ldblib.c +lua-5.1/src/ldebug.c +lua-5.1/src/ldebug.h +lua-5.1/src/ldo.c +lua-5.1/src/ldo.h +lua-5.1/src/ldump.c +lua-5.1/src/lfunc.c +lua-5.1/src/lfunc.h +lua-5.1/src/lgc.c +lua-5.1/src/lgc.h +lua-5.1/src/linit.c +lua-5.1/src/liolib.c +lua-5.1/src/llex.c +lua-5.1/src/llex.h +lua-5.1/src/llimits.h +lua-5.1/src/lmathlib.c +lua-5.1/src/lmem.c +lua-5.1/src/lmem.h +lua-5.1/src/loadlib.c +lua-5.1/src/lobject.c +lua-5.1/src/lobject.h +lua-5.1/src/lopcodes.c +lua-5.1/src/lopcodes.h +lua-5.1/src/loslib.c +lua-5.1/src/lparser.c +lua-5.1/src/lparser.h +lua-5.1/src/lstate.c +lua-5.1/src/lstate.h +lua-5.1/src/lstring.c +lua-5.1/src/lstring.h +lua-5.1/src/lstrlib.c +lua-5.1/src/ltable.c +lua-5.1/src/ltable.h +lua-5.1/src/ltablib.c +lua-5.1/src/ltm.c +lua-5.1/src/ltm.h +lua-5.1/src/lua.c +lua-5.1/src/lua.h +lua-5.1/src/luac.c +lua-5.1/src/luaconf.h +lua-5.1/src/lualib.h +lua-5.1/src/lundump.c +lua-5.1/src/lundump.h +lua-5.1/src/lvm.c +lua-5.1/src/lvm.h +lua-5.1/src/lzio.c +lua-5.1/src/lzio.h +lua-5.1/src/print.c +lua-5.1/test +lua-5.1/test/README +lua-5.1/test/bisect.lua +lua-5.1/test/cf.lua +lua-5.1/test/echo.lua +lua-5.1/test/env.lua +lua-5.1/test/factorial.lua +lua-5.1/test/fib.lua +lua-5.1/test/fibfor.lua +lua-5.1/test/globals.lua +lua-5.1/test/hello.lua +lua-5.1/test/life.lua +lua-5.1/test/luac.lua +lua-5.1/test/printf.lua +lua-5.1/test/readonly.lua +lua-5.1/test/sieve.lua +lua-5.1/test/sort.lua +lua-5.1/test/table.lua +lua-5.1/test/trace-calls.lua +lua-5.1/test/trace-globals.lua +lua-5.1/test/xd.lua +END OF MANIFEST diff --git a/deps/lua/README b/deps/lua/README new file mode 100644 index 0000000000000000000000000000000000000000..f082d4046091b1a54c1b7dea41c8db04d8c21b2b --- /dev/null +++ b/deps/lua/README @@ -0,0 +1,37 @@ +README for Lua 5.1 + +See INSTALL for installation instructions. +See HISTORY for a summary of changes since the last released version. + +* What is Lua? + ------------ + Lua is a powerful, light-weight programming language designed for extending + applications. Lua is also frequently used as a general-purpose, stand-alone + language. Lua is free software. + + For complete information, visit Lua's web site at http://www.lua.org/ . + For an executive summary, see http://www.lua.org/about.html . + + Lua has been used in many different projects around the world. + For a short list, see http://www.lua.org/uses.html . + +* Availability + ------------ + Lua is freely available for both academic and commercial purposes. + See COPYRIGHT and http://www.lua.org/license.html for details. + Lua can be downloaded at http://www.lua.org/download.html . + +* Installation + ------------ + Lua is implemented in pure ANSI C, and compiles unmodified in all known + platforms that have an ANSI C compiler. Under Unix, simply typing "make" + should work. See INSTALL for detailed instructions. + +* Origin + ------ + Lua is developed at Lua.org, a laboratory of the Department of Computer + Science of PUC-Rio (the Pontifical Catholic University of Rio de Janeiro + in Brazil). + For more information about the authors, see http://www.lua.org/authors.html . + +(end of README) diff --git a/deps/lua/doc/contents.html b/deps/lua/doc/contents.html new file mode 100644 index 0000000000000000000000000000000000000000..564377c942a1e5e0c0a76c85bbde344c5cca450d --- /dev/null +++ b/deps/lua/doc/contents.html @@ -0,0 +1,405 @@ + + +Lua 5.1 reference manual - contents + + + + + +
+

+Lua +Lua 5.1 Reference Manual +

+ + +Copyright +© 2006 Lua.org, PUC-Rio. All rights reserved. + +
+ +

Contents

+ + +

Quick index

+ + + + + + +
+

Functions

+_G
+_VERSION
+assert
+collectgarbage
+coroutine.create
+coroutine.resume
+coroutine.running
+coroutine.status
+coroutine.wrap
+coroutine.yield
+debug.debug
+debug.getfenv
+debug.gethook
+debug.getinfo
+debug.getlocal
+debug.getmetatable
+debug.getregistry
+debug.getupvalue
+debug.setfenv
+debug.sethook
+debug.setlocal
+debug.setmetatable
+debug.setupvalue
+debug.traceback
+dofile
+error
+file:close
+file:flush
+file:lines
+file:read
+file:seek
+file:setvbuf
+file:write
+getfenv
+getmetatable
+io.close
+io.flush
+io.input
+io.lines
+io.open
+io.output
+io.popen
+io.read
+io.tmpfile
+io.type
+io.write
+ipairs
+load
+loadfile
+loadstring
+math.abs
+math.acos
+math.asin
+math.atan2
+math.atan
+math.ceil
+math.cosh
+math.cos
+math.deg
+math.exp
+math.floor
+math.fmod
+math.frexp
+math.ldexp
+math.log10
+math.log
+math.max
+math.min
+math.modf
+math.pow
+math.rad
+math.random
+math.randomseed
+math.sinh
+math.sin
+math.sqrt
+math.tanh
+math.tan
+module
+next
+os.clock
+os.date
+os.difftime
+os.execute
+os.exit
+os.getenv
+os.remove
+os.rename
+os.setlocale
+os.time
+os.tmpname
+package.cpath
+package.loaded
+package.loadlib
+package.path
+package.preload
+package.seeall
+pairs
+pcall
+print
+rawequal
+rawget
+rawset
+require
+select
+setfenv
+setmetatable
+string.byte
+string.char
+string.dump
+string.find
+string.format
+string.gmatch
+string.gsub
+string.len
+string.lower
+string.match
+string.rep
+string.reverse
+string.sub
+string.upper
+table.concat
+table.insert
+table.maxn
+table.remove
+table.sort
+tonumber
+tostring
+type
+unpack
+xpcall
+ +
+

API

+lua_Alloc
+lua_CFunction
+lua_Debug
+lua_Hook
+lua_Integer
+lua_Number
+lua_Reader
+lua_State
+lua_Writer
+lua_atpanic
+lua_call
+lua_checkstack
+lua_close
+lua_concat
+lua_cpcall
+lua_createtable
+lua_dump
+lua_equal
+lua_error
+lua_gc
+lua_getallocf
+lua_getfenv
+lua_getfield
+lua_getglobal
+lua_gethook
+lua_gethookcount
+lua_gethookmask
+lua_getinfo
+lua_getlocal
+lua_getmetatable
+lua_getstack
+lua_gettable
+lua_gettop
+lua_getupvalue
+lua_insert
+lua_isboolean
+lua_iscfunction
+lua_isfunction
+lua_islightuserdata
+lua_isnil
+lua_isnumber
+lua_isstring
+lua_istable
+lua_isthread
+lua_isuserdata
+lua_lessthan
+lua_load
+lua_newstate
+lua_newtable
+lua_newthread
+lua_newuserdata
+lua_next
+lua_objlen
+lua_pcall
+lua_pop
+lua_pushboolean
+lua_pushcclosure
+lua_pushcfunction
+lua_pushfstring
+lua_pushinteger
+lua_pushlightuserdata
+lua_pushlstring
+lua_pushnil
+lua_pushnumber
+lua_pushstring
+lua_pushthread
+lua_pushvalue
+lua_pushvfstring
+lua_rawequal
+lua_rawget
+lua_rawgeti
+lua_rawset
+lua_rawseti
+lua_register
+lua_remove
+lua_replace
+lua_resume
+lua_setallocf
+lua_setfenv
+lua_setfield
+lua_setglobal
+lua_sethook
+lua_setlocal
+lua_setmetatable
+lua_settable
+lua_settop
+lua_setupvalue
+lua_status
+lua_toboolean
+lua_tocfunction
+lua_tointeger
+lua_tolstring
+lua_tonumber
+lua_topointer
+lua_tostring
+lua_tothread
+lua_touserdata
+lua_type
+lua_typename
+lua_xmove
+lua_yield
+ +
+

Auxiliary library

+luaL_Buffer
+luaL_Reg
+luaL_addchar
+luaL_addlstring
+luaL_addsize
+luaL_addstring
+luaL_addvalue
+luaL_argcheck
+luaL_argerror
+luaL_buffinit
+luaL_callmeta
+luaL_checkany
+luaL_checkint
+luaL_checkinteger
+luaL_checklong
+luaL_checklstring
+luaL_checknumber
+luaL_checkoption
+luaL_checkstack
+luaL_checkstring
+luaL_checktype
+luaL_checkudata
+luaL_error
+luaL_getmetafield
+luaL_getmetatable
+luaL_gsub
+luaL_loadbuffer
+luaL_loadfile
+luaL_loadstring
+luaL_newmetatable
+luaL_newstate
+luaL_openlibs
+luaL_optint
+luaL_optinteger
+luaL_optlong
+luaL_optlstring
+luaL_optnumber
+luaL_optstring
+luaL_prepbuffer
+luaL_pushresult
+luaL_ref
+luaL_register
+luaL_typename
+luaL_typerror
+luaL_unref
+luaL_where
+ +
+

+ +


+ +Last update: +Fri Feb 10 17:15:37 BRST 2006 + + + + diff --git a/deps/lua/doc/logo.gif b/deps/lua/doc/logo.gif new file mode 100644 index 0000000000000000000000000000000000000000..2f5e4ac2e742fbb7675e739879211553758aea9c Binary files /dev/null and b/deps/lua/doc/logo.gif differ diff --git a/deps/lua/doc/lua.1 b/deps/lua/doc/lua.1 new file mode 100644 index 0000000000000000000000000000000000000000..24809cc6c10e47660e09f5d0129795dc8c870404 --- /dev/null +++ b/deps/lua/doc/lua.1 @@ -0,0 +1,163 @@ +.\" $Id: lua.man,v 1.11 2006/01/06 16:03:34 lhf Exp $ +.TH LUA 1 "$Date: 2006/01/06 16:03:34 $" +.SH NAME +lua \- Lua interpreter +.SH SYNOPSIS +.B lua +[ +.I options +] +[ +.I script +[ +.I args +] +] +.SH DESCRIPTION +.B lua +is the stand-alone Lua interpreter. +It loads and executes Lua programs, +either in textual source form or +in precompiled binary form. +(Precompiled binaries are output by +.BR luac , +the Lua compiler.) +.B lua +can be used as a batch interpreter and also interactively. +.LP +The given +.I options +(see below) +are executed and then +the Lua program in file +.I script +is loaded and executed. +The given +.I args +are available to +.I script +as strings in a global table named +.BR arg . +If these arguments contain spaces or other characters special to the shell, +then they should be quoted +(but note that the quotes will be removed by the shell). +The arguments in +.B arg +start at 0, +which contains the string +.RI ' script '. +The index of the last argument is stored in +.BR arg.n . +The arguments given in the command line before +.IR script , +including the name of the interpreter, +are available in negative indices in +.BR arg . +.LP +At the very start, +before even handling the command line, +.B lua +executes the contents of the environment variable +.BR LUA_INIT , +if it is defined. +If the value of +.B LUA_INIT +is of the form +.RI '@ filename ', +then +.I filename +is executed. +Otherwise, the string is assumed to be a Lua statement and is executed. +.LP +Options start with +.B '\-' +and are described below. +You can use +.B "'\--'" +to signal the end of options. +.LP +If no arguments are given, +then +.B "\-v \-i" +is assumed when the standard input is a terminal; +otherwise, +.B "\-" +is assumed. +.LP +In interactive mode, +.B lua +prompts the user, +reads lines from the standard input, +and executes them as they are read. +If a line does not contain a complete statement, +then a secondary prompt is displayed and +lines are read until a complete statement is formed or +a syntax error is found. +So, one way to interrupt the reading of an incomplete statement is +to force a syntax error: +adding a +.B ';' +in the middle of a statement is a sure way of forcing a syntax error +(except inside multiline strings and comments; these must be closed explicitly). +If a line starts with +.BR '=' , +then +.B lua +displays the values of all the expressions in the remainder of the +line. The expressions must be separated by commas. +The primary prompt is the value of the global variable +.BR _PROMPT , +if this value is a string; +otherwise, the default prompt is used. +Similarly, the secondary prompt is the value of the global variable +.BR _PROMPT2 . +So, +to change the prompts, +set the corresponding variable to a string of your choice. +You can do that after calling the interpreter +or on the command line +(but in this case you have to be careful with quotes +if the prompt string contains a space; otherwise you may confuse the shell.) +The default prompts are "> " and ">> ". +.SH OPTIONS +.TP +.B \- +load and execute the standard input as a file, +that is, +not interactively, +even when the standard input is a terminal. +.TP +.BI \-e " stat" +execute statement +.IR stat . +You need to quote +.I stat +if it contains spaces, quotes, +or other characters special to the shell. +.TP +.B \-i +enter interactive mode after +.I script +is executed. +.TP +.BI \-l " name" +call +.BI require(' name ') +before executing +.IR script . +Typically used to load libraries. +.TP +.B \-v +show version information. +.SH "SEE ALSO" +.BR luac (1) +.br +http://www.lua.org/ +.SH DIAGNOSTICS +Error messages should be self explanatory. +.SH AUTHORS +R. Ierusalimschy, +L. H. de Figueiredo, +and +W. Celes +.\" EOF diff --git a/deps/lua/doc/lua.css b/deps/lua/doc/lua.css new file mode 100644 index 0000000000000000000000000000000000000000..90f62312d0e1e33b9e7e59546649cb619ffa4d6f --- /dev/null +++ b/deps/lua/doc/lua.css @@ -0,0 +1,15 @@ +body { + color: #000000 ; + background-color: #FFFFFF ; + font-family: sans-serif ; +} + +a:link { + color: #000080 ; +} + +a:link:hover, a:visited:hover { + color: #000080 ; + background-color: #E0E0FF ; +} + diff --git a/deps/lua/doc/lua.html b/deps/lua/doc/lua.html new file mode 100644 index 0000000000000000000000000000000000000000..1d435ab029c3a41a49d804bb245a294c982511ce --- /dev/null +++ b/deps/lua/doc/lua.html @@ -0,0 +1,172 @@ + + + +LUA man page + + + + + +

NAME

+lua - Lua interpreter +

SYNOPSIS

+lua +[ +options +] +[ +script +[ +args +] +] +

DESCRIPTION

+lua +is the stand-alone Lua interpreter. +It loads and executes Lua programs, +either in textual source form or +in precompiled binary form. +(Precompiled binaries are output by +luac, +the Lua compiler.) +lua +can be used as a batch interpreter and also interactively. +

+The given +options +(see below) +are executed and then +the Lua program in file +script +is loaded and executed. +The given +args +are available to +script +as strings in a global table named +arg. +If these arguments contain spaces or other characters special to the shell, +then they should be quoted +(but note that the quotes will be removed by the shell). +The arguments in +arg +start at 0, +which contains the string +'script'. +The index of the last argument is stored in +arg.n. +The arguments given in the command line before +script, +including the name of the interpreter, +are available in negative indices in +arg. +

+At the very start, +before even handling the command line, +lua +executes the contents of the environment variable +LUA_INIT, +if it is defined. +If the value of +LUA_INIT +is of the form +'@filename', +then +filename +is executed. +Otherwise, the string is assumed to be a Lua statement and is executed. +

+Options start with +'-' +and are described below. +You can use +'--' +to signal the end of options. +

+If no arguments are given, +then +"-v -i" +is assumed when the standard input is a terminal; +otherwise, +"-" +is assumed. +

+In interactive mode, +lua +prompts the user, +reads lines from the standard input, +and executes them as they are read. +If a line does not contain a complete statement, +then a secondary prompt is displayed and +lines are read until a complete statement is formed or +a syntax error is found. +So, one way to interrupt the reading of an incomplete statement is +to force a syntax error: +adding a +';' +in the middle of a statement is a sure way of forcing a syntax error +(except inside multiline strings and comments; these must be closed explicitly). +If a line starts with +'=', +then +lua +displays the values of all the expressions in the remainder of the +line. The expressions must be separated by commas. +The primary prompt is the value of the global variable +_PROMPT, +if this value is a string; +otherwise, the default prompt is used. +Similarly, the secondary prompt is the value of the global variable +_PROMPT2. +So, +to change the prompts, +set the corresponding variable to a string of your choice. +You can do that after calling the interpreter +or on the command line +(but in this case you have to be careful with quotes +if the prompt string contains a space; otherwise you may confuse the shell.) +The default prompts are "> " and ">> ". +

OPTIONS

+

+- +load and execute the standard input as a file, +that is, +not interactively, +even when the standard input is a terminal. +

+-e stat +execute statement +stat. +You need to quote +stat +if it contains spaces, quotes, +or other characters special to the shell. +

+-i +enter interactive mode after +script +is executed. +

+-l name +call +require('name') +before executing +script. +Typically used to load libraries. +

+-v +show version information. +

SEE ALSO

+luac(1) +
+http://www.lua.org/ +

DIAGNOSTICS

+Error messages should be self explanatory. +

AUTHORS

+R. Ierusalimschy, +L. H. de Figueiredo, +and +W. Celes + + + diff --git a/deps/lua/doc/luac.1 b/deps/lua/doc/luac.1 new file mode 100644 index 0000000000000000000000000000000000000000..d8146782df78a432f0da341b11da0e72e6cec31d --- /dev/null +++ b/deps/lua/doc/luac.1 @@ -0,0 +1,136 @@ +.\" $Id: luac.man,v 1.28 2006/01/06 16:03:34 lhf Exp $ +.TH LUAC 1 "$Date: 2006/01/06 16:03:34 $" +.SH NAME +luac \- Lua compiler +.SH SYNOPSIS +.B luac +[ +.I options +] [ +.I filenames +] +.SH DESCRIPTION +.B luac +is the Lua compiler. +It translates programs written in the Lua programming language +into binary files that can be later loaded and executed. +.LP +The main advantages of precompiling chunks are: +faster loading, +protecting source code from accidental user changes, +and +off-line syntax checking. +.LP +Pre-compiling does not imply faster execution +because in Lua chunks are always compiled into bytecodes before being executed. +.B luac +simply allows those bytecodes to be saved in a file for later execution. +.LP +Pre-compiled chunks are not necessarily smaller than the corresponding source. +The main goal in pre-compiling is faster loading. +.LP +The binary files created by +.B luac +are portable only among architectures with the same word size and byte order. +.LP +.B luac +produces a single output file containing the bytecodes +for all source files given. +By default, +the output file is named +.BR luac.out , +but you can change this with the +.B \-o +option. +.LP +In the command line, +you can mix +text files containing Lua source and +binary files containing precompiled chunks. +This is useful to combine several precompiled chunks, +even from different (but compatible) platforms, +into a single precompiled chunk. +.LP +You can use +.B "'\-'" +to indicate the standard input as a source file +and +.B "'\--'" +to signal the end of options +(that is, +all remaining arguments will be treated as files even if they start with +.BR "'\-'" ). +.LP +The internal format of the binary files produced by +.B luac +is likely to change when a new version of Lua is released. +So, +save the source files of all Lua programs that you precompile. +.LP +.SH OPTIONS +Options must be separate. +.TP +.B \-l +produce a listing of the compiled bytecode for Lua's virtual machine. +Listing bytecodes is useful to learn about Lua's virtual machine. +If no files are given, then +.B luac +loads +.B luac.out +and lists its contents. +.TP +.BI \-o " file" +output to +.IR file , +instead of the default +.BR luac.out . +(You can use +.B "'\-'" +for standard output, +but not on platforms that open standard output in text mode.) +The output file may be a source file because +all files are loaded before the output file is written. +Be careful not to overwrite precious files. +.TP +.B \-p +load files but do not generate any output file. +Used mainly for syntax checking and for testing precompiled chunks: +corrupted files will probably generate errors when loaded. +Lua always performs a thorough integrity test on precompiled chunks. +Bytecode that passes this test is completely safe, +in the sense that it will not break the interpreter. +However, +there is no guarantee that such code does anything sensible. +(None can be given, because the halting problem is unsolvable.) +If no files are given, then +.B luac +loads +.B luac.out +and tests its contents. +No messages are displayed if the file passes the integrity test. +.TP +.B \-s +strip debug information before writing the output file. +This saves some space in very large chunks, +but if errors occur when running a stripped chunk, +then the error messages may not contain the full information they usually do. +For instance, +line numbers and names of local variables are lost. +.TP +.B \-v +show version information. +.SH FILES +.TP 15 +.B luac.out +default output file +.SH "SEE ALSO" +.BR lua (1) +.br +http://www.lua.org/ +.SH DIAGNOSTICS +Error messages should be self explanatory. +.SH AUTHORS +L. H. de Figueiredo, +R. Ierusalimschy and +W. Celes +.\" EOF diff --git a/deps/lua/doc/luac.html b/deps/lua/doc/luac.html new file mode 100644 index 0000000000000000000000000000000000000000..179ffe82886cb900e11105b28a3040f0e2778c1f --- /dev/null +++ b/deps/lua/doc/luac.html @@ -0,0 +1,145 @@ + + + +LUAC man page + + + + + +

NAME

+luac - Lua compiler +

SYNOPSIS

+luac +[ +options +] [ +filenames +] +

DESCRIPTION

+luac +is the Lua compiler. +It translates programs written in the Lua programming language +into binary files that can be later loaded and executed. +

+The main advantages of precompiling chunks are: +faster loading, +protecting source code from accidental user changes, +and +off-line syntax checking. +

+Precompiling does not imply faster execution +because in Lua chunks are always compiled into bytecodes before being executed. +luac +simply allows those bytecodes to be saved in a file for later execution. +

+Precompiled chunks are not necessarily smaller than the corresponding source. +The main goal in precompiling is faster loading. +

+The binary files created by +luac +are portable only among architectures with the same word size and byte order. +

+luac +produces a single output file containing the bytecodes +for all source files given. +By default, +the output file is named +luac.out, +but you can change this with the +-o +option. +

+In the command line, +you can mix +text files containing Lua source and +binary files containing precompiled chunks. +This is useful because several precompiled chunks, +even from different (but compatible) platforms, +can be combined into a single precompiled chunk. +

+You can use +'-' +to indicate the standard input as a source file +and +'--' +to signal the end of options +(that is, +all remaining arguments will be treated as files even if they start with +'-'). +

+The internal format of the binary files produced by +luac +is likely to change when a new version of Lua is released. +So, +save the source files of all Lua programs that you precompile. +

+

OPTIONS

+Options must be separate. +

+-l +produce a listing of the compiled bytecode for Lua's virtual machine. +Listing bytecodes is useful to learn about Lua's virtual machine. +If no files are given, then +luac +loads +luac.out +and lists its contents. +

+-o file +output to +file, +instead of the default +luac.out. +(You can use +'-' +for standard output, +but not on platforms that open standard output in text mode.) +The output file may be a source file because +all files are loaded before the output file is written. +Be careful not to overwrite precious files. +

+-p +load files but do not generate any output file. +Used mainly for syntax checking and for testing precompiled chunks: +corrupted files will probably generate errors when loaded. +Lua always performs a thorough integrity test on precompiled chunks. +Bytecode that passes this test is completely safe, +in the sense that it will not break the interpreter. +However, +there is no guarantee that such code does anything sensible. +(None can be given, because the halting problem is unsolvable.) +If no files are given, then +luac +loads +luac.out +and tests its contents. +No messages are displayed if the file passes the integrity test. +

+-s +strip debug information before writing the output file. +This saves some space in very large chunks, +but if errors occur when running a stripped chunk, +then the error messages may not contain the full information they usually do. +For instance, +line numbers and names of local variables are lost. +

+-v +show version information. +

FILES

+

+luac.out +default output file +

SEE ALSO

+lua(1) +
+http://www.lua.org/ +

DIAGNOSTICS

+Error messages should be self explanatory. +

AUTHORS

+L. H. de Figueiredo, +R. Ierusalimschy and +W. Celes + + + diff --git a/deps/lua/doc/manual.html b/deps/lua/doc/manual.html new file mode 100644 index 0000000000000000000000000000000000000000..a44d2e8ea3ce362158221e27893317f4ced14ddd --- /dev/null +++ b/deps/lua/doc/manual.html @@ -0,0 +1,6247 @@ + + + + +Lua 5.1 Reference Manual + + + + + +
+

+[Lua logo] +Lua 5.1 Reference Manual +

+ +by Roberto Ierusalimschy, Luiz Henrique de Figueiredo, Waldemar Celes +

+ +Copyright +© 2006 Lua.org, PUC-Rio. All rights reserved. + +


+ +

+

+ + + +

1 - Introduction

+ +

Lua is an extension programming language designed to support +general procedural programming with data description +facilities. +It also offers good support for object-oriented programming, +functional programming, and data-driven programming. +Lua is intended to be used as a powerful, light-weight +scripting language for any program that needs one. +Lua is implemented as a library, written in clean C +(that is, in the common subset of ANSI C and C++). + +

Being an extension language, Lua has no notion of a "main" program: +it only works embedded in a host client, +called the embedding program or simply the host. +This host program can invoke functions to execute a piece of Lua code, +can write and read Lua variables, +and can register C functions to be called by Lua code. +Through the use of C functions, Lua can be augmented to cope with +a wide range of different domains, +thus creating customized programming languages sharing a syntactical framework. +The Lua distribution includes a sample host program called lua, +which uses the Lua library to offer a complete, stand-alone Lua interpreter. + +

Lua is free software, +and is provided as usual with no guarantees, +as stated in its license. +The implementation described in this manual is available +at Lua's official web site, www.lua.org. + +

Like any other reference manual, +this document is dry in places. +For a discussion of the decisions behind the design of Lua, +see the technical papers available at Lua's web site. +For a detailed introduction to programming in Lua, +see Roberto's book, Programming in Lua. + +

+

2 - The Language

+ +

This section describes the lexis, the syntax, and the semantics of Lua. +In other words, +this section describes +which tokens are valid, +how they can be combined, +and what their combinations mean. + +

The language constructs will be explained using the usual extended BNF notation, +in which +{a} means 0 or more a's, and +[a] means an optional a. +Non-terminals are shown in italics, +keywords are shown in bold, +and other terminal symbols are shown in typewriter font, +enclosed in single quotes. +The complete syntax of Lua can be found at the end of this manual. + +

2.1 - Lexical Conventions

+ +

Names +(also called identifiers) +in Lua can be any string of letters, +digits, and underscores, +not beginning with a digit. +This coincides with the definition of names in most languages. +(The definition of letter depends on the current locale: +any character considered alphabetic by the current locale +can be used in an identifier.) +Identifiers are used to name variables and table fields. + +

The following keywords are reserved +and cannot be used as names: + +

+       and       break     do        else      elseif
+       end       false     for       function  if
+       in        local     nil       not       or
+       repeat    return    then      true      until     while
+
+ +

Lua is a case-sensitive language: +and is a reserved word, but And and AND +are two different, valid names. +As a convention, names starting with an underscore followed by +uppercase letters (such as _VERSION) +are reserved for internal global variables used by Lua. + +

The following strings denote other tokens: +

+       +     -     *     /     %     ^     #
+       ==    ~=    <=    >=    <     >     =
+       (     )     {     }     [     ]
+       ;     :     ,     .     ..    ...
+
+ +

Literal strings +can be delimited by matching single or double quotes, +and can contain the following C-like escape sequences: +

+Moreover, a `\newline´ +(that is, a backslash followed by a real newline) +results in a newline in the string. +A character in a string may also be specified by its numerical value +using the escape sequence `\ddd´, +where ddd is a sequence of up to three decimal digits. +(Note that if a numerical escape is to be followed by a digit, +it must be expressed using exactly three digits.) +Strings in Lua may contain any 8-bit value, including embedded zeros, +which can be specified as `\0´. + +

To put a double (single) quote, a newline, a backslash, +or an embedded zero +inside a literal string enclosed by double (single) quotes +you must use an escape sequence. +Any other character may be directly inserted into the literal. +(Some control characters may cause problems for the file system, +but Lua has no problem with them.) + +

Literal strings can also be defined using a long format +enclosed by long brackets. +We define an opening long bracket of level n as an opening +square bracket followed by n equal signs followed by another +opening square bracket. +So, an opening long bracket of level 0 is written as [[, +an opening long bracket of level 1 is written as [=[, +and so on. +A closing long bracket is defined similarly; +for instance, a closing long bracket of level 4 is written as ]====]. +A long string starts with an opening long bracket of any level and +ends at the first closing long bracket of the same level. +Literals in this bracketed form may run for several lines, +do not interpret any escape sequences, +and ignore long brackets of any other level. +They may contain anything except a closing bracket of the proper level +or embedded zeros. + +

For convenience, +when the opening long bracket is immediately followed by a newline, +the newline is not included in the string. +As an example, in a system using ASCII +(in which `a´ is coded as 97, +newline is coded as 10, and `1´ is coded as 49), +the four literals below denote the same string: +

+      (1)   'alo\n123"'
+      (2)   "alo\n123\""
+      (3)   '\97lo\10\04923"'
+      (4)   [[alo
+            123"]]
+      (5)   [==[
+            alo
+            123"]==]
+
+ +

Numerical constants may be written with an optional decimal part +and an optional decimal exponent. +Lua also accepts integer hexadecimal constants, +by prefixing them with 0x. +Examples of valid numerical constants are +

+       3       3.0     3.1416  314.16e-2   0.31416E1  0xff  0x56
+
+ +

Comments start with a double hyphen (--) +anywhere outside a string. +If the text immediately after -- is not an opening long bracket, +the comment is a short comment, +which runs until the end of the line. +Otherwise, it is a long comment, +which runs until the corresponding closing long bracket. +Long comments are frequently used to disable code temporarily. + +

2.2 - Values and Types

+ +

Lua is a dynamically typed language. +This means that +variables do not have types; only values do. +There are no type definitions in the language. +All values carry their own type. + +

All values in Lua are first-class values. +This means that all values can be stored in variables, +passed as arguments to other functions, and returned as results. + +

There are eight basic types in Lua: +nil, boolean, number, +string, function, userdata, +thread, and table. +Nil is the type of the value nil, +whose main property is to be different from any other value; +it usually represents the absence of a useful value. +Boolean is the type of the values false and true. +Both nil and false make a condition false; +any other value makes it true. +Number represents real (double-precision floating-point) numbers. +(It is easy to build Lua interpreters that use other +internal representations for numbers, +such as single-precision float or long integers. +See file luaconf.h.) +String represents arrays of characters. + +Lua is 8-bit clean: +Strings may contain any 8-bit character, +including embedded zeros (`\0´) (see 2.1). + +

Lua can call (and manipulate) functions written in Lua and +functions written in C +(see 2.5.8). + +

The type userdata is provided to allow arbitrary C data to +be stored in Lua variables. +This type corresponds to a block of raw memory +and has no pre-defined operations in Lua, +except assignment and identity test. +However, by using metatables, +the programmer can define operations for userdata values +(see 2.8). +Userdata values cannot be created or modified in Lua, +only through the C API. +This guarantees the integrity of data owned by the host program. + +

The type thread represents independent threads of execution +and it is used to implement coroutines (see 2.11). +Do not confuse Lua threads with operating-system threads. +Lua supports coroutines on all systems, +even those that do not support threads. + +

The type table implements associative arrays, +that is, arrays that can be indexed not only with numbers, +but with any value (except nil). +Tables can be heterogeneous; +that is, they can contain values of all types (except nil). +Tables are the sole data structuring mechanism in Lua; +they may be used to represent ordinary arrays, +symbol tables, sets, records, graphs, trees, etc. +To represent records, Lua uses the field name as an index. +The language supports this representation by +providing a.name as syntactic sugar for a["name"]. +There are several convenient ways to create tables in Lua +(see 2.5.7). + +

Like indices, +the value of a table field can be of any type (except nil). +In particular, +because functions are first-class values, +table fields may contain functions. +Thus tables may also carry methods (see 2.5.9). + +

Tables, functions, threads, and (full) userdata values are objects: +variables do not actually contain these values, +only references to them. +Assignment, parameter passing, and function returns +always manipulate references to such values; +these operations do not imply any kind of copy. + +

The library function type returns a string describing the type +of a given value. + +

2.2.1 - Coercion

+ +

Lua provides automatic conversion between +string and number values at run time. +Any arithmetic operation applied to a string tries to convert +this string to a number, following the usual conversion rules. +Conversely, whenever a number is used where a string is expected, +the number is converted to a string, in a reasonable format. +For complete control over how numbers are converted to strings, +use the format function from the string library +(see string.format). + +

2.3 - Variables

+ +

Variables are places that store values. + +There are three kinds of variables in Lua: +global variables, local variables, and table fields. + +

A single name can denote a global variable or a local variable +(or a function's formal parameter, +which is a particular kind of local variable): +

+	var ::= Name
+
+Name denotes identifiers, as defined in (see 2.1). + +

Variables are assumed to be global unless explicitly declared local +(see 2.4.7). +Local variables are lexically scoped: +Local variables can be freely accessed by functions +defined inside their scope (see 2.6). + +

Before the first assignment to a variable, its value is nil. + +

Square brackets are used to index a table: +

+	var ::= prefixexp `[´ exp `]´
+
+The first expression (prefixexp) should result in a table value; +the second expression (exp) +identifies a specific entry in this table. +The expression denoting the table to be indexed has a restricted syntax; +see 2.5 for details. + +

The syntax var.Name is just syntactic sugar for +var["Name"] and is used to denote table fields: +

+	var ::= prefixexp `.´ Name
+
+ +

The meaning of accesses to global variables +and table fields can be changed via metatables. +An access to an indexed variable t[i] is equivalent to +a call gettable_event(t,i). +(See 2.8 for a complete description of the +gettable_event function. +This function is not defined or callable in Lua. +We use it here only for explanatory purposes.) + +

All global variables live as fields in ordinary Lua tables, +called environment tables or simply +environments (see 2.9). +Each function has its own reference to an environment, +so that all global variables in this function +will refer to this environment table. +When a function is created, +it inherits the environment from the function that created it. +To get the environment table of a Lua function, +you call getfenv. +To replace it, +you call setfenv. +(You can only manipulate the environment of C functions +through the debug library; (see 5.9).) + +

An access to a global variable x +is equivalent to _env.x, +which in turn is equivalent to +

+       gettable_event(_env, "x")
+
+where _env is the environment of the running function. +(See 2.8 for a complete description of the +gettable_event function. +This function is not defined or callable in Lua. +Similarly, the _env variable is not defined in Lua. +We use them here only for explanatory purposes.) + +

2.4 - Statements

+ +

Lua supports an almost conventional set of statements, +similar to those in Pascal or C. +This set includes +assignment, control structures, function calls, +table constructors, and variable declarations. + +

2.4.1 - Chunks

+ +

The unit of execution of Lua is called a chunk. +A chunk is simply a sequence of statements, +which are executed sequentially. +Each statement can be optionally followed by a semicolon: +

+	chunk ::= {stat [`;´]}
+
+There are no empty statements and thus `;;´ is not legal. + +

Lua handles a chunk as the body of an anonymous function +with a variable number of arguments +(see 2.5.9). +As such, chunks can define local variables, +receive arguments, and return values. + +

A chunk may be stored in a file or in a string inside the host program. +When a chunk is executed, first it is pre-compiled into instructions for +a virtual machine, +and then the compiled code is executed +by an interpreter for the virtual machine. + +

Chunks may also be pre-compiled into binary form; +see program luac for details. +Programs in source and compiled forms are interchangeable; +Lua automatically detects the file type and acts accordingly. + + +

2.4.2 - Blocks

+A block is a list of statements; +syntactically, a block is the same as a chunk: +
+	block ::= chunk
+
+ +

A block may be explicitly delimited to produce a single statement: +

+	stat ::= do block end
+
+Explicit blocks are useful +to control the scope of variable declarations. +Explicit blocks are also sometimes used to +add a return or break statement in the middle +of another block (see 2.4.4). + + +

2.4.3 - Assignment

+ +

Lua allows multiple assignment. +Therefore, the syntax for assignment +defines a list of variables on the left side +and a list of expressions on the right side. +The elements in both lists are separated by commas: +

+	stat ::= varlist1 `=´ explist1
+	varlist1 ::= var {`,´ var}
+	explist1 ::= exp {`,´ exp}
+
+Expressions are discussed in 2.5. + +

Before the assignment, +the list of values is adjusted to the length of +the list of variables. +If there are more values than needed, +the excess values are thrown away. +If there are fewer values than needed, +the list is extended with as many nil's as needed. +If the list of expressions ends with a function call, +then all values returned by this call enter in the list of values, +before the adjustment +(except when the call is enclosed in parentheses; see 2.5). + +

The assignment statement first evaluates all its expressions +and only then are the assignments performed. +Thus the code +

+       i = 3
+       i, a[i] = i+1, 20
+
+sets a[3] to 20, without affecting a[4] +because the i in a[i] is evaluated (to 3) +before it is assigned 4. +Similarly, the line +
+       x, y = y, x
+
+exchanges the values of x and y. + +

The meaning of assignments to global variables +and table fields can be changed via metatables. +An assignment to an indexed variable t[i] = val is equivalent to +settable_event(t,i,val). +(See 2.8 for a complete description of the +settable_event function. +This function is not defined or callable in Lua. +We use it here only for explanatory purposes.) + +

An assignment to a global variable x = val +is equivalent to the assignment +_env.x = val, +which in turn is equivalent to +

+       settable_event(_env, "x", val)
+
+where _env is the environment of the running function. +(The _env variable is not defined in Lua. +We use it here only for explanatory purposes.) + +

2.4.4 - Control Structures

+The control structures +if, while, and repeat have the usual meaning and +familiar syntax: + + + +
+	stat ::= while exp do block end
+	stat ::= repeat block until exp
+	stat ::= if exp then block {elseif exp then block} [else block] end
+
+Lua also has a for statement, in two flavors (see 2.4.5). + +

The condition expression of a +control structure may return any value. +Both false and nil are considered false. +All values different from nil and false are considered true +(in particular, the number 0 and the empty string are also true). + +

In the repeat--until loop, +the inner block does not end at the until keyword, +but only after the condition. +So, the condition can refer to local variables +declared inside the loop block. + +

The return statement is used to return values +from a function or a chunk (which is just a function). + +Functions and chunks may return more than one value, +so the syntax for the return statement is +

+	stat ::= return [explist1]
+
+ +

The break statement is used to terminate the execution of a +while, repeat, or for loop, +skipping to the next statement after the loop: + +

+	stat ::= break
+
+A break ends the innermost enclosing loop. + +

The return and break +statements can only be written as the last statement of a block. +If it is really necessary to return or break in the +middle of a block, +then an explicit inner block can be used, +as in the idioms +`do return end´ and +`do break end´, +because now return and break are the last statements in +their (inner) blocks. + +

2.4.5 - For Statement

+ +

The for statement has two forms: +one numeric and one generic. + + +

The numeric for loop repeats a block of code while a +control variable runs through an arithmetic progression. +It has the following syntax: +

+	stat ::= for Name `=´ exp `,´ exp [`,´ exp] do block end
+
+The block is repeated for name starting at the value of +the first exp, until it passes the second exp by steps of the +third exp. +More precisely, a for statement like +
+       for var = e1, e2, e3 do block end
+
+is equivalent to the code: +
+       do
+         local _var, _limit, _step = tonumber(e1), tonumber(e2), tonumber(e3)
+         if not (_var and _limit and _step) then error() end
+         while (_step>0 and _var<=_limit) or (_step<=0 and _var>=_limit) do
+           local var = _var
+           block
+           _var = _var + _step
+         end
+       end
+
+Note the following: + + +

The generic for statement works over functions, +called iterators. +On each iteration, the iterator function is called to produce a new value, +stopping when this new value is nil. +The generic for loop has the following syntax: +

+	stat ::= for namelist in explist1 do block end
+	namelist ::= Name {`,´ Name}
+
+A for statement like +
+       for var_1, ..., var_n in explist do block end
+
+is equivalent to the code: +
+       do
+         local _f, _s, _var = explist
+         while true do
+           local var_1, ... , var_n = _f(_s, _var)
+           _var = var_1
+           if _var == nil then break end
+           block
+         end
+       end
+
+Note the following: + + +

2.4.6 - Function Calls as Statements

+To allow possible side-effects, +function calls can be executed as statements: +
+	stat ::= functioncall
+
+In this case, all returned values are thrown away. +Function calls are explained in 2.5.8. + +

2.4.7 - Local Declarations

+Local variables may be declared anywhere inside a block. +The declaration may include an initial assignment: +
+	stat ::= local namelist [`=´ explist1]
+
+If present, an initial assignment has the same semantics +of a multiple assignment (see 2.4.3). +Otherwise, all variables are initialized with nil. + +

A chunk is also a block (see 2.4.1), +and so local variables can be declared in a chunk outside any explicit block. +The scope of such local variables extends until the end of the chunk. + +

The visibility rules for local variables are explained in 2.6. + +

2.5 - Expressions

+ +

+The basic expressions in Lua are the following: +

+	exp ::= prefixexp
+	exp ::= nil  |  false  |  true
+	exp ::= Number
+	exp ::= String
+	exp ::= function
+	exp ::= tableconstructor
+	exp ::= `...´
+	exp ::= exp binop exp
+	exp ::= unop exp
+	prefixexp ::= var  |  functioncall  |  `(´ exp `)´
+
+ +

Numbers and literal strings are explained in 2.1; +variables are explained in 2.3; +function definitions are explained in 2.5.9; +function calls are explained in 2.5.8; +table constructors are explained in 2.5.7. +Vararg expressions, +denoted by three dots (`...´), can only be used inside +vararg functions; +they are explained in 2.5.9. + + +

Binary operators comprise arithmetic operators (see 2.5.1), +relational operators (see 2.5.2), and logical operators (see 2.5.3). +Unary operators comprise the unary minus (see 2.5.1), +the unary not (see 2.5.3), +and the unary length operator (see 2.5.5). + +

Both function calls and vararg expressions may result in multiple values. +If the expression is used as a statement (see 2.4.6) +(only possible for function calls), +then its return list is adjusted to zero elements, +thus discarding all returned values. +If the expression is used inside another expression +or in the middle of a list of expressions, +then its result list is adjusted to one element, +thus discarding all values except the first one. +If the expression is used as the last element of a list of expressions, +then no adjustment is made, +unless the call is enclosed in parentheses. + +

Here are some examples: +

+       f()                -- adjusted to 0 results
+       g(f(), x)          -- f() is adjusted to 1 result
+       g(x, f())          -- g gets x plus all values returned by f()
+       a,b,c = f(), x     -- f() is adjusted to 1 result (c gets nil)
+       a,b = ...          -- a gets the first vararg parameter, b gets
+                          -- the second (both a and b may get nil if there is
+                          -- no corresponding vararg parameter)
+       a,b,c = x, f()     -- f() is adjusted to 2 results
+       a,b,c = f()        -- f() is adjusted to 3 results
+       return f()         -- returns all values returned by f()
+       return ...         -- returns all received vararg parameters
+       return x,y,f()     -- returns x, y, and all values returned by f()
+       {f()}              -- creates a list with all values returned by f()
+       {...}              -- creates a list with all vararg parameters
+       {f(), nil}         -- f() is adjusted to 1 result
+
+ +

An expression enclosed in parentheses always results in only one value. +Thus, +(f(x,y,z)) is always a single value, +even if f returns several values. +(The value of (f(x,y,z)) is the first value returned by f +or nil if f does not return any values.) + +

2.5.1 - Arithmetic Operators

+Lua supports the usual arithmetic operators: +the binary + (addition), +- (subtraction), * (multiplication), +/ (division), % (modulo), and ^ (exponentiation); +and unary - (negation). +If the operands are numbers, or strings that can be converted to +numbers (see 2.2.1), +then all operations have the usual meaning. +Exponentiation works for any exponent. +For instance, x^(-0.5) computes the inverse of the square root of x. +Modulus is defined as +
+       a % b == a - math.floor(a/b)*b
+
+That is, it is the remainder of a division that rounds +the quotient towards minus infinity. + +

2.5.2 - Relational Operators

+The relational operators in Lua are +
+       ==    ~=    <     >     <=    >=
+
+These operators always result in false or true. + +

Equality (==) first compares the type of its operands. +If the types are different, then the result is false. +Otherwise, the values of the operands are compared. +Numbers and strings are compared in the usual way. +Objects (tables, userdata, threads, and functions) +are compared by reference: +Two objects are considered equal only if they are the same object. +Every time you create a new object +(a table, userdata, thread, or function), +this new object is different from any previously existing object. + +

You can change the way that Lua compares tables and userdata +by using the "eq" metamethod (see 2.8). + +

The conversion rules of 2.2.1 +do not apply to equality comparisons. +Thus, "0"==0 evaluates to false, +and t[0] and t["0"] denote different +entries in a table. + + +

The operator ~= is exactly the negation of equality (==). + +

The order operators work as follows. +If both arguments are numbers, then they are compared as such. +Otherwise, if both arguments are strings, +then their values are compared according to the current locale. +Otherwise, Lua tries to call the "lt" or the "le" +metamethod (see 2.8). + +

2.5.3 - Logical Operators

+The logical operators in Lua are + +
+       and   or    not
+
+Like the control structures (see 2.4.4), +all logical operators consider both false and nil as false +and anything else as true. + + +

The negation operator not always returns false or true. +The conjunction operator and returns its first argument +if this value is false or nil; +otherwise, and returns its second argument. +The disjunction operator or returns its first argument +if this value is different from nil and false; +otherwise, or returns its second argument. +Both and and or use short-cut evaluation; +that is, +the second operand is evaluated only if necessary. +Here are some examples: +

+       10 or 20            --> 10
+       10 or error()       --> 10
+       nil or "a"          --> "a"
+       nil and 10          --> nil
+       false and error()   --> false
+       false and nil       --> false
+       false or nil        --> nil
+       10 and 20           --> 20
+
+(In this manual, +`-->´ indicates the result of the preceding expression.) + +

2.5.4 - Concatenation

+The string concatenation operator in Lua is +denoted by two dots (`..´). +If both operands are strings or numbers, then they are converted to +strings according to the rules mentioned in 2.2.1. +Otherwise, the "concat" metamethod is called (see 2.8). + +

2.5.5 - The Length Operator

+ +

The length operator is denoted by the unary operator #. +The length of a string is its number of bytes +(that is, the usual meaning of string length when each +character is one byte). + +

The length of a table t is defined to be any +integer index n +such that t[n] is not nil and t[n+1] is nil; +moreover, if t[1] is nil, n may be zero. +For a regular array, with non-nil values from 1 to a given n, +its length is exactly that n, +the index of its last value. +If the array has "holes" +(that is, nil values between other non-nil values), +then #t may be any of the indices that +directly precedes a nil value +(that is, it may consider any such nil value as the end of +the array). + +

2.5.6 - Precedence

+Operator precedence in Lua follows the table below, +from lower to higher priority: +
+       or
+       and
+       <     >     <=    >=    ~=    ==
+       ..
+       +     -
+       *     /     %
+       not   #     - (unary)
+       ^
+
+As usual, +you can use parentheses to change the precedences of an expression. +The concatenation (`..´) and exponentiation (`^´) +operators are right associative. +All other binary operators are left associative. + +

2.5.7 - Table Constructors

+Table constructors are expressions that create tables. +Every time a constructor is evaluated, a new table is created. +Constructors can be used to create empty tables, +or to create a table and initialize some of its fields. +The general syntax for constructors is +
+	tableconstructor ::= `{´ [fieldlist] `}´
+	fieldlist ::= field {fieldsep field} [fieldsep]
+	field ::= `[´ exp `]´ `=´ exp  |  Name `=´ exp  |  exp
+	fieldsep ::= `,´  |  `;´
+
+ +

Each field of the form [exp1] = exp2 adds to the new table an entry +with key exp1 and value exp2. +A field of the form name = exp is equivalent to +["name"] = exp. +Finally, fields of the form exp are equivalent to +[i] = exp, where i are consecutive numerical integers, +starting with 1. +Fields in the other formats do not affect this counting. +For example, +

+       a = { [f(1)] = g; "x", "y"; x = 1, f(x), [30] = 23; 45 }
+
+is equivalent to +
+       do
+         local t = {}
+         t[f(1)] = g
+         t[1] = "x"         -- 1st exp
+         t[2] = "y"         -- 2nd exp
+         t.x = 1            -- t["x"] = 1
+         t[3] = f(x)        -- 3rd exp
+         t[30] = 23
+         t[4] = 45          -- 4th exp
+         a = t
+       end
+
+ +

If the last field in the list has the form exp +and the expression is a function call or a vararg expression, +then all values returned by this expression enter the list consecutively +(see 2.5.8). +To avoid this, +enclose the function call (or the vararg expression) +in parentheses (see 2.5). + +

The field list may have an optional trailing separator, +as a convenience for machine-generated code. + +

2.5.8 - Function Calls

+A function call in Lua has the following syntax: +
+	functioncall ::= prefixexp args
+
+In a function call, +first prefixexp and args are evaluated. +If the value of prefixexp has type function, +then this function is called +with the given arguments. +Otherwise, the prefixexp "call" metamethod is called, +having as first parameter the value of prefixexp, +followed by the original call arguments +(see 2.8). + +

The form +

+	functioncall ::= prefixexp `:´ Name args
+
+can be used to call "methods". +A call v:name(...) +is syntactic sugar for v.name(v,...), +except that v is evaluated only once. + +

Arguments have the following syntax: +

+	args ::= `(´ [explist1] `)´
+	args ::= tableconstructor
+	args ::= String
+
+All argument expressions are evaluated before the call. +A call of the form f{...} is syntactic sugar for f({...}); +that is, the argument list is a single new table. +A call of the form f'...' +(or f"..." or f[[...]]) is syntactic sugar for f('...'); +that is, the argument list is a single literal string. + +

As an exception to the free-format syntax of Lua, +you cannot put a line break before the `(´ in a function call. +This restriction avoids some ambiguities in the language. +If you write +

+       a = f
+       (g).x(a)
+
+Lua would see that as a single statement, a = f(g).x(a). +So, if you want two statements, you must add a semi-colon between them. +If you actually want to call f, +you must remove the line break before (g). + +

A call of the form return functioncall is called +a tail call. +Lua implements proper tail calls +(or proper tail recursion): +In a tail call, +the called function reuses the stack entry of the calling function. +Therefore, there is no limit on the number of nested tail calls that +a program can execute. +However, a tail call erases any debug information about the +calling function. +Note that a tail call only happens with a particular syntax, +where the return has one single function call as argument; +this syntax makes the calling function return exactly +the returns of the called function. +So, none of the following examples are tail calls: +

+       return (f(x))        -- results adjusted to 1
+       return 2 * f(x)
+       return x, f(x)       -- additional results
+       f(x); return         -- results discarded
+       return x or f(x)     -- results adjusted to 1
+
+ +

2.5.9 - Function Definitions

+ +

The syntax for function definition is +

+	function ::= function funcbody
+	funcbody ::= `(´ [parlist1] `)´ block end
+
+ +

The following syntactic sugar simplifies function definitions: +

+	stat ::= function funcname funcbody
+	stat ::= local function Name funcbody
+	funcname ::= Name {`.´ Name} [`:´ Name]
+
+The statement +
+       function f () ... end
+
+translates to +
+       f = function () ... end
+
+The statement +
+       function t.a.b.c.f () ... end
+
+translates to +
+       t.a.b.c.f = function () ... end
+
+The statement +
+       local function f () ... end
+
+translates to +
+       local f; f = function () ... end
+
+not this: +
+       local f = function () ... end
+
+(This only makes a difference when the body of the function +contains references to f.) + +

A function definition is an executable expression, +whose value has type function. +When Lua pre-compiles a chunk, +all its function bodies are pre-compiled too. +Then, whenever Lua executes the function definition, +the function is instantiated (or closed). +This function instance (or closure) +is the final value of the expression. +Different instances of the same function +may refer to different external local variables +and may have different environment tables. + +

Parameters act as local variables that are +initialized with the argument values: +

+	parlist1 ::= namelist [`,´ `...´]  |  `...´
+
+When a function is called, +the list of arguments is adjusted to +the length of the list of parameters, +unless the function is a variadic or vararg function, +which is +indicated by three dots (`...´) at the end of its parameter list. +A vararg function does not adjust its argument list; +instead, it collects all extra arguments and supplies them +to the function through a vararg expression, +which is also written as three dots. +The value of this expression is a list of all actual extra arguments, +similar to a function with multiple results. +If a vararg expression is used inside another expression +or in the middle of a list of expressions, +then its return list is adjusted to one element. +If the expression is used as the last element of a list of expressions, +then no adjustment is made +(unless the call is enclosed in parentheses). + +

As an example, consider the following definitions: +

+       function f(a, b) end
+       function g(a, b, ...) end
+       function r() return 1,2,3 end
+
+Then, we have the following mapping from arguments to parameters and +to the vararg expression: +
+       CALL            PARAMETERS
+
+       f(3)             a=3, b=nil
+       f(3, 4)          a=3, b=4
+       f(3, 4, 5)       a=3, b=4
+       f(r(), 10)       a=1, b=10
+       f(r())           a=1, b=2
+
+       g(3)             a=3, b=nil, ... -->  (nothing)
+       g(3, 4)          a=3, b=4,   ... -->  (nothing)
+       g(3, 4, 5, 8)    a=3, b=4,   ... -->  5  8
+       g(5, r())        a=5, b=1,   ... -->  2  3
+
+ +

Results are returned using the return statement (see 2.4.4). +If control reaches the end of a function +without encountering a return statement, +then the function returns with no results. + +

The colon syntax +is used for defining methods, +that is, functions that have an implicit extra parameter self. +Thus, the statement +

+       function t.a.b.c:f (...) ... end
+
+is syntactic sugar for +
+       t.a.b.c.f = function (self, ...) ... end
+
+ +

2.6 - Visibility Rules

+ + +

Lua is a lexically scoped language. +The scope of variables begins at the first statement after +their declaration and lasts until the end of the innermost block that +includes the declaration. +Consider the following example: +

+       x = 10                -- global variable
+       do                    -- new block
+         local x = x         -- new `x', with value 10
+         print(x)            --> 10
+         x = x+1
+         do                  -- another block
+           local x = x+1     -- another `x'
+           print(x)          --> 12
+         end
+         print(x)            --> 11
+       end
+       print(x)              --> 10  (the global one)
+
+ +

Notice that, in a declaration like local x = x, +the new x being declared is not in scope yet, +and so the second x refers to the outside variable. + +

Because of the lexical scoping rules, +local variables can be freely accessed by functions +defined inside their scope. +A local variable used by an inner function is called +an upvalue, or external local variable, +inside the inner function. + +

Notice that each execution of a local statement +defines new local variables. +Consider the following example: +

+       a = {}
+       local x = 20
+       for i=1,10 do
+         local y = 0
+         a[i] = function () y=y+1; return x+y end
+       end
+
+The loop creates ten closures +(that is, ten instances of the anonymous function). +Each of these closures uses a different y variable, +while all of them share the same x. + +

2.7 - Error Handling

+ +

Because Lua is an embedded extension language, +all Lua actions start from C code in the host program +calling a function from the Lua library (see lua_pcall). +Whenever an error occurs during Lua compilation or execution, +control returns to C, +which can take appropriate measures +(such as printing an error message). + +

Lua code can explicitly generate an error by calling the +error function. +If you need to catch errors in Lua, +you can use the pcall function. + +

2.8 - Metatables

+ +

Every value in Lua may have a metatable. +This metatable is an ordinary Lua table +that defines the behavior of the original value +under certain special operations. +You can change several aspects of the behavior +of operations over a value by setting specific fields in its metatable. +For instance, when a non-numeric value is the operand of an addition, +Lua checks for a function in the field "__add" in its metatable. +If it finds one, +Lua calls this function to perform the addition. + +

We call the keys in a metatable events +and the values metamethods. +In the previous example, the event is "add" +and the metamethod is the function that performs the addition. + +

You can query the metatable of any value +through the getmetatable function. + +

You can replace the metatable of tables +through the setmetatable +function. +You cannot change the metatable of other types from Lua +(except using the debug library); +you must use the C API for that. + +

Tables and userdata have individual metatables +(although multiple tables and userdata can share +a same table as their metatable); +values of all other types share one single metatable per type. +So, there is one single metatable for all numbers, +and for all strings, etc. + +

A metatable may control how an object behaves in arithmetic operations, +order comparisons, concatenation, length operation, and indexing. +A metatable can also define a function to be called when a userdata +is garbage collected. +For each of these operations Lua associates a specific key +called an event. +When Lua performs one of these operations over a value, +it checks whether this value has a metatable with the corresponding event. +If so, the value associated with that key (the metamethod) +controls how Lua will perform the operation. + +

Metatables control the operations listed next. +Each operation is identified by its corresponding name. +The key for each operation is a string with its name prefixed by +two underscores, `__´; +for instance, the key for operation "add" is the +string "__add". +The semantics of these operations is better explained by a Lua function +describing how the interpreter executes the operation. + +

The code shown here in Lua is only illustrative; +the real behavior is hard coded in the interpreter +and it is much more efficient than this simulation. +All functions used in these descriptions +(rawget, tonumber, etc.) +are described in 5.1. +In particular, to retrieve the metamethod of a given object, +we use the expression +

+       metatable(obj)[event]
+
+This should be read as +
+       rawget(getmetatable(obj) or {}, event)
+
+That is, the access to a metamethod does not invoke other metamethods, +and the access to objects with no metatables does not fail +(it simply results in nil). + +

+ +

2.9 - Environments

+ +

Besides metatables, +objects of types thread, function, and userdata +have another table associated with them, +called their environment. +Like metatables, environments are regular tables and +multiple objects can share the same environment. + +

Environments associated with userdata have no meaning for Lua. +It is only a feature for programmers to associate a table to +a userdata. + +

Environments associated with threads are called +global environments. +They are used as the default environment for threads and +non-nested functions created by the thread +(through loadfile, loadstring or load) +and can be directly accessed by C code (see 3.3). + +

Environments associated with C functions can be directly +accessed by C code (see 3.3). +They are used as the default environment for other C functions +created by the function. + +

Environments associated with Lua functions are used to resolve +all accesses to global variables within the function (see 2.3). +They are used as the default environment for other Lua functions +created by the function. + +

You can change the environment of a Lua function or the +running thread by calling setfenv. +You can get the environment of a Lua function or the running thread +by calling getfenv. +To manipulate the environment of other objects +(userdata, C functions, other threads) you must +use the C API. + +

2.10 - Garbage Collection

+ +

Lua performs automatic memory management. +This means that +you have to worry neither about allocating memory for new objects +nor about freeing it when the objects are no longer needed. +Lua manages memory automatically by running +a garbage collector from time to time +to collect all dead objects +(that is, these objects that are no longer accessible from Lua). +All objects in Lua are subject to automatic management: +tables, userdata, functions, threads, and strings. + +

Lua implements an incremental mark-and-sweep collector. +It uses two numbers to control its garbage-collection cycles: +the garbage-collector pause and +the garbage-collector step multiplier. + +

The garbage-collector pause +controls how long the collector waits before starting a new cycle. +Larger values make the collector less aggressive. +Values smaller than 1 mean the collector will not wait to +start a new cycle. +A value of 2 means that the collector waits for the total memory in use +to double before starting a new cycle. + +

The step multiplier +controls the relative speed of the collector relative to +memory allocation. +Larger values make the collector more aggressive but also increases +the size of each incremental step. +Values smaller than 1 make the collector too slow and +may result in the collector never finishing a cycle. +The default, 2, means that the collector runs at "twice" +the speed of memory allocation. + +

You can change these numbers by calling lua_gc in C +or collectgarbage in Lua. +Both get as arguments percentage points +(so an argument 100 means a real value of 1). +With these functions you can also control +the collector directly (e.g., stop and restart it). + +

2.10.1 - Garbage-Collection Metamethods

+ +

Using the C API, +you can set garbage-collector metamethods for userdata (see 2.8). +These metamethods are also called finalizers. +Finalizers allow you to coordinate Lua's garbage collection +with external resource management +(such as closing files, network or database connections, +or freeing your own memory). + +

Garbage userdata with a field __gc in their metatables are not +collected immediately by the garbage collector. +Instead, Lua puts them in a list. +After the collection, +Lua does the equivalent of the following function +for each userdata in that list: +

+ function gc_event (udata)
+   local h = metatable(udata).__gc
+   if h then
+     h(udata)
+   end
+ end
+
+ +

At the end of each garbage-collection cycle, +the finalizers for userdata are called in reverse +order of their creation, +among those collected in that cycle. +That is, the first finalizer to be called is the one associated +with the userdata created last in the program. + +

2.10.2 - Weak Tables

+ +

A weak table is a table whose elements are +weak references. +A weak reference is ignored by the garbage collector. +In other words, +if the only references to an object are weak references, +then the garbage collector will collect this object. + +

A weak table can have weak keys, weak values, or both. +A table with weak keys allows the collection of its keys, +but prevents the collection of its values. +A table with both weak keys and weak values allows the collection of +both keys and values. +In any case, if either the key or the value is collected, +the whole pair is removed from the table. +The weakness of a table is controlled by the value of the +__mode field of its metatable. +If the __mode field is a string containing the character `k´, +the keys in the table are weak. +If __mode contains `v´, +the values in the table are weak. + +

After you use a table as a metatable, +you should not change the value of its field __mode. +Otherwise, the weak behavior of the tables controlled by this +metatable is undefined. + +

2.11 - Coroutines

+ +

Lua supports coroutines, +also called collaborative multithreading. +A coroutine in Lua represents an independent thread of execution. +Unlike threads in multithread systems, however, +a coroutine only suspends its execution by explicitly calling +a yield function. + +

You create a coroutine with a call to coroutine.create. +Its sole argument is a function +that is the main function of the coroutine. +The create function only creates a new coroutine and +returns a handle to it (an object of type thread); +it does not start the coroutine execution. + +

When you first call coroutine.resume, +passing as its first argument +the thread returned by coroutine.create, +the coroutine starts its execution, +at the first line of its main function. +Extra arguments passed to coroutine.resume are passed on +to the coroutine main function. +After the coroutine starts running, +it runs until it terminates or yields. + +

A coroutine can terminate its execution in two ways: +Normally, when its main function returns +(explicitly or implicitly, after the last instruction); +and abnormally, if there is an unprotected error. +In the first case, coroutine.resume returns true, +plus any values returned by the coroutine main function. +In case of errors, coroutine.resume returns false +plus an error message. + +

A coroutine yields by calling coroutine.yield. +When a coroutine yields, +the corresponding coroutine.resume returns immediately, +even if the yield happens inside nested function calls +(that is, not in the main function, +but in a function directly or indirectly called by the main function). +In the case of a yield, coroutine.resume also returns true, +plus any values passed to coroutine.yield. +The next time you resume the same coroutine, +it continues its execution from the point where it yielded, +with the call to coroutine.yield returning any extra +arguments passed to coroutine.resume. + +

The coroutine.wrap function creates a coroutine, +just like coroutine.create, +but instead of returning the coroutine itself, +it returns a function that, when called, resumes the coroutine. +Any arguments passed to this function +go as extra arguments to coroutine.resume. +coroutine.wrap returns all the values returned by coroutine.resume, +except the first one (the boolean error code). +Unlike coroutine.resume, +coroutine.wrap does not catch errors; +any error is propagated to the caller. + +

As an example, +consider the next code: +

+function foo (a)
+  print("foo", a)
+  return coroutine.yield(2*a)
+end
+
+co = coroutine.create(function (a,b)
+      print("co-body", a, b)
+      local r = foo(a+1)
+      print("co-body", r)
+      local r, s = coroutine.yield(a+b, a-b)
+      print("co-body", r, s)
+      return b, "end"
+end)
+       
+print("main", coroutine.resume(co, 1, 10))
+print("main", coroutine.resume(co, "r"))
+print("main", coroutine.resume(co, "x", "y"))
+print("main", coroutine.resume(co, "x", "y"))
+
+When you run it, it produces the following output: +
+co-body 1       10
+foo     2
+main    true    4
+co-body r
+main    true    11      -9
+co-body x       y
+main    true    10      end
+main    false   cannot resume dead coroutine
+
+ +

+

3 - The Application Program Interface

+ + +

This section describes the C API for Lua, that is, +the set of C functions available to the host program to communicate +with Lua. +All API functions and related types and constants +are declared in the header file lua.h. + +

Even when we use the term "function", +any facility in the API may be provided as a macro instead. +All such macros use each of its arguments exactly once +(except for the first argument, which is always a Lua state), +and so do not generate any hidden side-effects. + +

As in most C libraries, +the Lua API functions do not check their arguments for validity or consistency. +However, you can change this behavior by compiling Lua +with a proper definition for the macro luai_apicheck, +in file luaconf.h. + +

3.1 - The Stack

+ +

Lua uses a virtual stack to pass values to and from C. +Each element in this stack represents a Lua value +(nil, number, string, etc.). + +

Whenever Lua calls C, the called function gets a new stack, +which is independent of previous stacks and of stacks of +C functions that are still active. +This stack initially contains any arguments to the C function +and it is where the C function pushes its results +to be returned to the caller (see lua_CFunction). + +

For convenience, +most query operations in the API do not follow a strict stack discipline. +Instead, they can refer to any element in the stack +by using an index: +A positive index represents an absolute stack position +(starting at 1); +a negative index represents an offset relative to the top of the stack. +More specifically, if the stack has n elements, +then index 1 represents the first element +(that is, the element that was pushed onto the stack first) +and +index n represents the last element; +index -1 also represents the last element +(that is, the element at the top) +and index -n represents the first element. +We say that an index is valid +if it lies between 1 and the stack top +(that is, if 1 <= abs(index) <= top). + + +

3.2 - Stack Size

+ +

When you interact with Lua API, +you are responsible for ensuring consistency. +In particular, +you are responsible for controlling stack overflow. +You can use the function lua_checkstack +to grow the stack size. + +

Whenever Lua calls C, +it ensures that at least LUA_MINSTACK stack positions are available. +LUA_MINSTACK is defined as 20, +so that usually you do not have to worry about stack space +unless your code has loops pushing elements onto the stack. + +

Most query functions accept as indices any value inside the +available stack space, that is, indices up to the maximum stack size +you have set through lua_checkstack. +Such indices are called acceptable indices. +More formally, we define an acceptable index +as follows: +

+       (index < 0 && abs(index) <= top) || (index > 0 && index <= stackspace)
+
+Note that 0 is never an acceptable index. + +

3.3 - Pseudo-Indices

+ +

Unless otherwise noted, +any function that accepts valid indices can also be called with +pseudo-indices, +which represent some Lua values that are accessible to C code +but which are not in the stack. +Pseudo-indices are used to access the thread environment, +the function environment, +the registry, +and the upvalues of a C function (see 3.4). + +

The thread environment (where global variables live) is +always at pseudo-index LUA_GLOBALSINDEX. +The environment of the running C function is always +at pseudo-index LUA_ENVIRONINDEX. + +

To access and change the value of global variables, +you can use regular table operations over an environment table. +For instance, to access the value of a global variable, do +

+       lua_getfield(L, LUA_GLOBALSINDEX, varname);
+
+ +

3.4 - C Closures

+ +

When a C function is created, +it is possible to associate some values with it, +thus creating a C closure; +these values are called upvalues and are +accessible to the function whenever it is called +(see lua_pushcclosure). + +

Whenever a C function is called, +its upvalues are located at specific pseudo-indices. +These pseudo-indices are produced by the macro +lua_upvalueindex. +The first value associated with a function is at position +lua_upvalueindex(1), and so on. +Any access to lua_upvalueindex(n), +where n is greater than the number of upvalues of the +current function, +produces an acceptable (but invalid) index. + +

3.5 - Registry

+ +

Lua provides a registry, +a pre-defined table that can be used by any C code to +store whatever Lua value it needs to store. +This table is always located at pseudo-index +LUA_REGISTRYINDEX. +Any C library can store data into this table, +but it should take care to choose keys different from those used +by other libraries, to avoid collisions. +Typically, you should use as key a string containing your library name +or a light userdata with the address of a C object in your code. + +

The integer keys in the registry are used by the reference mechanism, +implemented by the auxiliary library, +and therefore should not be used for other purposes. + +

3.6 - Error Handling in C

+ +

Internally, Lua uses the C longjmp facility to handle errors. +(You can also choose to use exceptions if you use C++; +See file luaconf.h.) +When Lua faces any error +(such as memory allocation errors, type errors, syntax errors, +and runtime errors) +it raises an error; +that is, it does a long jump. +A protected environment uses setjmp +to set a recover point; +any error jumps to the most recent active recover point. + +

Almost any function in the API may raise an error, +for instance due to a memory allocation error. +The following functions run in protected mode +(that is, they create a protected environment to run), +so they never raise an error: +lua_newstate, lua_close, lua_load, +lua_pcall, and lua_cpcall. + +

Inside a C function you can raise an error by calling lua_error. + +

3.7 - Functions and Types

+ +

Here we list all functions and types from the C API in +alphabetical order. + +

+


lua_Alloc

+
+          typedef void * (*lua_Alloc) (void *ud,
+                                       void *ptr,
+                                       size_t osize,
+                                       size_t nsize);
+
+
+ + +

The type of the memory allocation function used by Lua states. +The allocator function must provide a +functionality similar to realloc, +but not exactly the same. +Its arguments are +ud, an opaque pointer passed to lua_newstate; +ptr, a pointer to the block being allocated/reallocated/freed; +osize, the original size of the block; +nsize, the new size of the block. +ptr is NULL if and only if osize is zero. +When nsize is zero, the allocator must return NULL; +if osize is not zero, +it should free the block pointed to by ptr. +When nsize is not zero, the allocator returns NULL +if and only if it cannot fill the request. +When nsize is not zero and osize is zero, +the allocator should behave like malloc. +When nsize and osize are not zero, +the allocator behaves like realloc. +Lua assumes that the allocator never fails when +osize >= nsize. + +

Here is a simple implementation for the allocator function. +It is used in the auxiliary library by lua_newstate. +

+static void *l_alloc (void *ud, void *ptr, size_t osize, size_t nsize) {
+  (void)ud;     /* not used */
+  (void)osize;  /* not used */
+  if (nsize == 0) {
+    free(ptr);  /* ANSI requires that free(NULL) has no effect */
+    return NULL;
+  }
+  else
+    /* ANSI requires that realloc(NULL, size) == malloc(size) */
+    return realloc(ptr, nsize);
+}
+
+ +

+


lua_atpanic

+
+          lua_CFunction lua_atpanic (lua_State *L, lua_CFunction panicf);
+
+ + +

Sets a new panic function and returns the old one. + +

If an error happens outside any protected environment, +Lua calls a panic function +and then calls exit(EXIT_FAILURE), +thus exiting the host application. +Your panic function may avoid this exit by +never returning (e.g., doing a long jump). + +

The panic function can access the error message at the top of the stack. + +

+


lua_call

+
+          void lua_call (lua_State *L, int nargs, int nresults);
+
+ + +

Calls a function. + +

To call a function you must use the following protocol: +First, the function to be called is pushed onto the stack; +then, the arguments to the function are pushed +in direct order; +that is, the first argument is pushed first. +Finally you call lua_call; +nargs is the number of arguments that you pushed onto the stack. +All arguments and the function value are popped from the stack +when the function is called. +The function results are pushed onto the stack when the function returns. +The number of results is adjusted to nresults, +unless nresults is LUA_MULTRET. +In this case, all results from the function are pushed. +Lua takes care that the returned values fit into the stack space. +The function results are pushed onto the stack in direct order +(the first result is pushed first), +so that after the call the last result is on the top of the stack. + +

Any error inside the called function is propagated upwards +(with a longjmp). + +

The following example shows how the host program may do the +equivalent to this Lua code: +

+       a = f("how", t.x, 14)
+
+Here it is in C: +
+    lua_getfield(L, LUA_GLOBALSINDEX, "f");          /* function to be called */
+    lua_pushstring(L, "how");                                 /* 1st argument */
+    lua_getfield(L, LUA_GLOBALSINDEX, "t");            /* table to be indexed */
+    lua_getfield(L, -1, "x");                 /* push result of t.x (2nd arg) */
+    lua_remove(L, -2);                           /* remove `t' from the stack */
+    lua_pushinteger(L, 14);                                   /* 3rd argument */
+    lua_call(L, 3, 1);         /* call function with 3 arguments and 1 result */
+    lua_setfield(L, LUA_GLOBALSINDEX, "a");        /* set global variable `a' */
+
+Note that the code above is "balanced": +at its end, the stack is back to its original configuration. +This is considered good programming practice. + +

+


lua_CFunction

+
+          typedef int (*lua_CFunction) (lua_State *L);
+
+ + +

Type for C functions. + +

In order to communicate properly with Lua, +a C function must use the following protocol, +which defines the way parameters and results are passed: +A C function receives its arguments from Lua in its stack +in direct order (the first argument is pushed first). +So, when the function starts, +lua_gettop(L) returns the number of arguments received by the function. +The first argument (if any) is at index 1 +and its last argument is at index lua_gettop(L). +To return values to Lua, a C function just pushes them onto the stack, +in direct order (the first result is pushed first), +and returns the number of results. +Any other value in the stack below the results will be properly +discarded by Lua. +Like a Lua function, a C function called by Lua can also return +many results. + +

As an example, the following function receives a variable number +of numerical arguments and returns their average and sum: +

+       static int foo (lua_State *L) {
+         int n = lua_gettop(L);    /* number of arguments */
+         lua_Number sum = 0;
+         int i;
+         for (i = 1; i <= n; i++) {
+           if (!lua_isnumber(L, i)) {
+             lua_pushstring(L, "incorrect argument to function `average'");
+             lua_error(L);
+           }
+           sum += lua_tonumber(L, i);
+         }
+         lua_pushnumber(L, sum/n);        /* first result */
+         lua_pushnumber(L, sum);         /* second result */
+         return 2;                   /* number of results */
+       }
+
+ +

+


lua_checkstack

+
+          int lua_checkstack (lua_State *L, int extra);
+
+ + +

Ensures that there are at least extra free stack slots in the stack. +It returns false if it cannot grow the stack to that size. +This function never shrinks the stack; +if the stack is already larger than the new size, +it is left unchanged. + +

+


lua_close

+
+          void lua_close (lua_State *L);
+
+ + +

Destroys all objects in the given Lua state +(calling the corresponding garbage-collection metamethods, if any) +and frees all dynamic memory used by this state. +On several platforms, you may not need to call this function, +because all resources are naturally released when the host program ends. +On the other hand, long-running programs, +such as a daemon or a web server, +might need to release states as soon as they are not needed, +to avoid growing too large. + +

+


lua_concat

+
+          void lua_concat (lua_State *L, int n);
+
+ + +

Concatenates the n values at the top of the stack, +pops them, and leaves the result at the top. +If n is 1, the result is the single string on the stack +(that is, the function does nothing); +if n is 0, the result is the empty string. +Concatenation is done following the usual semantics of Lua +(see 2.5.4). + +

+


lua_cpcall

+
+          int lua_cpcall (lua_State *L, lua_CFunction func, void *ud);
+
+ + +

Calls the C function func in protected mode. +func starts with only one element in its stack, +a light userdata containing ud. +In case of errors, +lua_cpcall returns the same error codes as lua_pcall, +plus the error object on the top of the stack; +otherwise, it returns zero, and does not change the stack. +All values returned by func are discarded. + +

+


lua_createtable

+
+          void lua_createtable (lua_State *L, int narr, int nrec);
+
+ + +

Creates a new empty table and pushes it onto the stack. +The new table has space pre-allocated +for narr array elements and nrec non-array elements. +This pre-allocation is useful when you know exactly how many elements +the table will have. +Otherwise you can use the function lua_newtable. + +

+


lua_dump

+
+          int lua_dump (lua_State *L, lua_Writer writer, void *data);
+
+ + +

Dumps a function as a binary chunk. +Receives a Lua function on the top of the stack +and produces a binary chunk that, +if loaded again, +results in a function equivalent to the one dumped. +As it produces parts of the chunk, +lua_dump calls function writer (see lua_Writer) +with the given data +to write them. + +

The value returned is the error code returned by the last +call to the writer; +0 means no errors. + +

This function does not pop the Lua function from the stack. + +

+


lua_equal

+
+          int lua_equal (lua_State *L, int index1, int index2);
+
+ + +

Returns 1 if the two values in acceptable indices index1 and +index2 are equal, +following the semantics of the Lua == operator +(that is, may call metamethods). +Otherwise returns 0. +Also returns 0 if any of the indices is non valid. + +

+


lua_error

+
+          int lua_error (lua_State *L);
+
+ + +

Generates a Lua error. +The error message (which can actually be a Lua value of any type) +must be on the stack top. +This function does a long jump, +and therefore never returns. +(see luaL_error). + +

+


lua_gc

+
+          int lua_gc (lua_State *L, int what, int data);
+
+ + +

Controls the garbage collector. + +

This function performs several tasks, +according to the value of the parameter what: +

+ +

+


lua_getallocf

+
+          lua_Alloc lua_getallocf (lua_State *L, void **ud);
+
+ + +

Returns the memory allocator function of a given state. +If ud is not NULL, Lua stores in *ud the +opaque pointer passed to lua_newstate. + +

+


lua_getfenv

+
+          void lua_getfenv (lua_State *L, int index);
+
+ + +

Pushes on the stack the environment table of +the value at the given index. + +

+


lua_getfield

+
+          void lua_getfield (lua_State *L, int index, const char *k);
+
+ + +

Pushes onto the stack the value t[k], +where t is the value at the given valid index index. +As in Lua, this function may trigger a metamethod +for the "index" event (see 2.8). + +

+


lua_getglobal

+
+          void lua_getglobal (lua_State *L, const char *name);
+
+ + +

Pushes onto the stack the value of the global name. +It is defined as a macro: +

+#define lua_getglobal(L,s)  lua_getfield(L, LUA_GLOBALSINDEX, s)
+
+ +

+


lua_getmetatable

+
+          int lua_getmetatable (lua_State *L, int index);
+
+ + +

Pushes onto the stack the metatable of the value at the given +acceptable index. +If the index is not valid, +or if the value does not have a metatable, +the function returns 0 and pushes nothing on the stack. + +

+


lua_gettable

+
+          void lua_gettable (lua_State *L, int index);
+
+ + +

Pushes onto the stack the value t[k], +where t is the value at the given valid index index +and k is the value at the top of the stack. + +

This function pops the key from the stack +(putting the resulting value in its place). +As in Lua, this function may trigger a metamethod +for the "index" event (see 2.8). + +

+


lua_gettop

+
+          int lua_gettop (lua_State *L);
+
+ + +

Returns the index of the top element in the stack. +Because indices start at 1, +this result is equal to the number of elements in the stack +(and so 0 means an empty stack). + +

+


lua_insert

+
+          void lua_insert (lua_State *L, int index);
+
+ + +

Moves the top element into the given valid index, +shifting up the elements above this index to open space. +Cannot be called with a pseudo-index, +because a pseudo-index is not an actual stack position. + +

+


lua_Integer

+
+          typedef ptrdiff_t lua_Integer;
+
+ + +

The type used by the Lua API to represent integral values. + +

By default it is a ptrdiff_t, +which is usually the largest integral type the machine handles +"comfortably". + +

+


lua_isboolean

+
+          int lua_isboolean (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index has type boolean, +and 0 otherwise. + +

+


lua_iscfunction

+
+          int lua_iscfunction (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a C function, +and 0 otherwise. + +

+


lua_isfunction

+
+          int lua_isfunction (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a function +(either C or Lua), and 0 otherwise. + +

+


lua_islightuserdata

+
+          int lua_islightuserdata (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a light userdata, +and 0 otherwise. + +

+


lua_isnil

+
+          int lua_isnil (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is nil, +and 0 otherwise. + +

+


lua_isnumber

+
+          int lua_isnumber (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a number +or a string convertible to a number, +and 0 otherwise. + +

+


lua_isstring

+
+          int lua_isstring (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a string +or a number (which is always convertible to a string), +and 0 otherwise. + +

+


lua_istable

+
+          int lua_istable (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a table, +and 0 otherwise. + +

+


lua_isthread

+
+          int lua_isthread (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a thread, +and 0 otherwise. + +

+


lua_isuserdata

+
+          int lua_isuserdata (lua_State *L, int index);
+
+ + +

Returns 1 if the value at the given acceptable index is a userdata +(either full or light), and 0 otherwise. + +

+


lua_lessthan

+
+          int lua_lessthan (lua_State *L, int index1, int index2);
+
+ + +

Returns 1 if the value at acceptable index index1 is smaller +than the value at acceptable index index2, +following the semantics of the Lua < operator +(that is, may call metamethods). +Otherwise returns 0. +Also returns 0 if any of the indices is non valid. + +

+


lua_load

+
+          int lua_load (lua_State *L, lua_Reader reader, void *data,
+                                      const char *chunkname);
+
+
+ + +

Loads a Lua chunk. +If there are no errors, +lua_load pushes the compiled chunk as a Lua +function on top of the stack. +Otherwise, it pushes an error message. +The return values of lua_load are: +

+ +

lua_load automatically detects whether the chunk is text or binary, +and loads it accordingly (see program luac). + +

lua_load uses a user-supplied reader function to read the chunk +(see lua_Reader). +The data argument is an opaque value passed to the reader function. + +

The chunkname argument gives a name to the chunk, +which is used for error messages and in debug information (see 3.8). + +

+


lua_newstate

+
+          lua_State *lua_newstate (lua_Alloc f, void *ud);
+
+ + +

Creates a new, independent state. +Returns NULL if cannot create the state +(due to lack of memory). +The argument f is the allocator function; +Lua does all memory allocation for this state through this function. +The second argument, ud, is an opaque pointer that Lua +simply passes to the allocator in every call. + +

+


lua_newtable

+
+          void lua_newtable (lua_State *L);
+
+ + +

Creates a new empty table and pushes it onto the stack. +Equivalent to lua_createtable(L, 0, 0). + +

+


lua_newthread

+
+          lua_State *lua_newthread (lua_State *L);
+
+ + +

Creates a new thread, pushes it on the stack, +and returns a pointer to a lua_State that represents this new thread. +The new state returned by this function shares with the original state +all global objects (such as tables), +but has an independent execution stack. + +

There is no explicit function to close or to destroy a thread. +Threads are subject to garbage collection, +like any Lua object. + +

+


lua_newuserdata

+
+          void *lua_newuserdata (lua_State *L, size_t size);
+
+ + +

This function allocates a new block of memory with the given size, +pushes on the stack a new full userdata with the block address, +and returns this address. + +

Userdata represents C values in Lua. +A full userdata represents a block of memory. +It is an object (like a table): +You must create it, it can have its own metatable, +and you can detect when it is being collected. +A full userdata is only equal to itself (under raw equality). + +

When Lua collects a full userdata with a gc metamethod, +Lua calls the metamethod and marks the userdata as finalized. +When this userdata is collected again then +Lua frees its corresponding memory. + +

+


lua_next

+
+          int lua_next (lua_State *L, int index);
+
+ + +

Pops a key from the stack, +and pushes a key-value pair from the table at the given index +(the "next" pair after the given key). +If there are no more elements in the table, +then lua_next returns 0 (and pushes nothing). + +

A typical traversal looks like this: +

+       /* table is in the stack at index `t' */
+       lua_pushnil(L);  /* first key */
+       while (lua_next(L, t) != 0) {
+         /* `key' is at index -2 and `value' at index -1 */
+         printf("%s - %s\n",
+           lua_typename(L, lua_type(L, -2)), lua_typename(L, lua_type(L, -1)));
+         lua_pop(L, 1);  /* removes `value'; keeps `key' for next iteration */
+       }
+
+ +

While traversing a table, +do not call lua_tolstring directly on a key, +unless you know that the key is actually a string. +Recall that lua_tolstring changes +the value at the given index; +this confuses the next call to lua_next. + +

+


lua_Number

+
+          typedef double lua_Number;
+
+ + +

The type of numbers in Lua. +By default, it is double, but that can be changed in luaconf.h. + +

Through the configuration file you can change +Lua to operate with another type for numbers (e.g., float or long). + +

+


lua_objlen

+
+          size_t lua_objlen (lua_State *L, int index);
+
+ + +

Returns the "length" of the value at the given acceptable index: +for strings, this is the string length; +for tables, this is the result of the length operator (`#´); +for userdata, this is the size of the block of memory allocated +for the userdata; +for other values, it is 0. + +

+


lua_pcall

+
+          lua_pcall (lua_State *L, int nargs, int nresults, int errfunc);
+
+ + +

Calls a function in protected mode. + +

Both nargs and nresults have the same meaning as +in lua_call. +If there are no errors during the call, +lua_pcall behaves exactly like lua_call. +However, if there is any error, +lua_pcall catches it, +pushes a single value on the stack (the error message), +and returns an error code. +Like lua_call, +lua_pcall always removes the function +and its arguments from the stack. + +

If errfunc is 0, +then the error message returned on the stack +is exactly the original error message. +Otherwise, errfunc is the stack index of an +error handler function. +(In the current implementation, this index cannot be a pseudo-index.) +In case of runtime errors, +this function will be called with the error message +and its return value will be the message returned on the stack by lua_pcall. + +

Typically, the error handler function is used to add more debug +information to the error message, such as a stack traceback. +Such information cannot be gathered after the return of lua_pcall, +since by then the stack has unwound. + +

The lua_pcall function returns 0 in case of success +or one of the following error codes +(defined in lua.h): +

+ +

+


lua_pop

+
+          void lua_pop (lua_State *L, int n);
+
+ + +

Pops n elements from the stack. + +

+


lua_pushboolean

+
+          void lua_pushboolean (lua_State *L, int b);
+
+ + +

Pushes a boolean value with value b onto the stack. + +

+


lua_pushcclosure

+
+          void lua_pushcclosure (lua_State *L, lua_CFunction fn, int n);
+
+ + +

Pushes a new C closure onto the stack. + +

When a C function is created, +it is possible to associate some values with it, +thus creating a C closure (see 3.4); +these values are then accessible to the function whenever it is called. +To associate values with a C function, +first these values should be pushed onto the stack +(when there are multiple values, the first value is pushed first). +Then lua_pushcclosure +is called to create and push the C function onto the stack, +with the argument n telling how many values should be +associated with the function. +lua_pushcclosure also pops these values from the stack. + +

+


lua_pushcfunction

+
+          void lua_pushcfunction (lua_State *L, lua_CFunction f);
+
+ + +

Pushes a C function onto the stack. +This function receives a pointer to a C function +and pushes on the stack a Lua value of type function that, +when called, invokes the corresponding C function. + +

Any function to be registered in Lua must +follow the correct protocol to receive its parameters +and return its results (see lua_CFunction). + +

The call lua_pushcfunction(L, f) is equivalent to +lua_pushcclosure(L, f, 0). + +

+


lua_pushfstring

+
+          const char *lua_pushfstring (lua_State *L, const char *fmt, ...);
+
+ + +

Pushes onto the stack a formatted string +and returns a pointer to this string. +It is similar to the C function sprintf, +but has some important differences: +

+ +

+


lua_pushinteger

+
+          void lua_pushinteger (lua_State *L, lua_Integer n);
+
+ + +

Pushes a number with value n onto the stack. + +

+


lua_pushlightuserdata

+
+          void lua_pushlightuserdata (lua_State *L, void *p);
+
+ + +

Pushes a light userdata onto the stack. + +

Userdata represents C values in Lua. +A light userdata represents a pointer. +It is a value (like a number): +You do not create it, it has no metatables, +it is not collected (as it was never created). +A light userdata is equal to "any" +light userdata with the same C address. + +

+


lua_pushlstring

+
+          void lua_pushlstring (lua_State *L, const char *s, size_t len);
+
+ + +

Pushes the string pointed to by s with size len +onto the stack. +Lua makes (or reuses) an internal copy of the given string, +so the memory at s can be freed or reused immediately after +the function returns. +The string can contain embedded zeros. + +

+


lua_pushnil

+
+          void lua_pushnil (lua_State *L);
+
+ + +

Pushes a nil value onto the stack. + +

+


lua_pushnumber

+
+          void lua_pushnumber (lua_State *L, lua_Number n);
+
+ + +

Pushes a number with value n onto the stack. + +

+


lua_pushstring

+
+          void lua_pushstring (lua_State *L, const char *s);
+
+ + +

Pushes the zero-terminated string pointed to by s +onto the stack. +Lua makes (or reuses) an internal copy of the given string, +so the memory at s can be freed or reused immediately after +the function returns. +The string cannot contain embedded zeros; +it is assumed to end at the first zero. + +

+


lua_pushthread

+
+          void lua_pushthread (lua_State *L);
+
+ + +

Pushes the thread represented by L onto the stack. + +

+


lua_pushvalue

+
+          void lua_pushvalue (lua_State *L, int index);
+
+ + +

Pushes a copy of the element at the given valid index +onto the stack. + +

+


lua_pushvfstring

+
+          const char *lua_pushvfstring (lua_State *L, const char *fmt, va_list argp);
+
+ + +

Equivalent to lua_pushfstring, except that it receives a va_list +instead of a variable number of arguments. + +

+


lua_rawequal

+
+          int lua_rawequal (lua_State *L, int index1, int index2);
+
+ + +

Returns 1 if the two values in acceptable indices index1 and +index2 are primitively equal +(that is, without calling metamethods). +Otherwise returns 0. +Also returns 0 if any of the indices are non valid. + +

+


lua_rawget

+
+          void lua_rawget (lua_State *L, int index);
+
+ + +

Similar to lua_gettable, but does a raw access +(i.e., without metamethods). + +

+


lua_rawgeti

+
+          void lua_rawgeti (lua_State *L, int index, int n);
+
+ + +

Pushes onto the stack the value t[n], +where t is the value at the given valid index index. +The access is raw; +that is, it does not invoke metamethods. + +

+


lua_rawset

+
+          void lua_rawset (lua_State *L, int index);
+
+ + +

Similar to lua_settable, but does a raw assignment +(i.e., without metamethods). + +

+


lua_rawseti

+
+          void lua_rawseti (lua_State *L, int index, int n);
+
+ + +

Does the equivalent of t[n] = v, +where t is the value at the given valid index index +and v is the value at the top of the stack, + +

This function pops the value from the stack. +The assignment is raw; +that is, it does not invoke metamethods. + +

+


lua_Reader

+
+          typedef const char * (*lua_Reader)
+                               (lua_State *L, void *data, size_t *size);
+
+
+ + +

The reader function used by lua_load. +Every time it needs another piece of the chunk, +lua_load calls the reader, +passing along its data parameter. +The reader must return a pointer to a block of memory +with a new piece of the chunk +and set size to the block size. +The block must exist until the reader function is called again. +To signal the end of the chunk, the reader must return NULL. +The reader function may return pieces of any size greater than zero. + +

+


lua_register

+
+          void lua_register (lua_State *L, const char *name, lua_CFunction f);
+
+ + +

Sets the C function f as the new value of global name. +It is defined as a macro: +

+#define lua_register(L,n,f)  (lua_pushcfunction(L, f), lua_setglobal(L, n))
+
+ +

+


lua_remove

+
+          void lua_remove (lua_State *L, int index);
+
+ + +

Removes the element at the given valid index, +shifting down the elements above this index to fill the gap. +Cannot be called with a pseudo-index, +because a pseudo-index is not an actual stack position. + +

+


lua_replace

+
+          void lua_replace (lua_State *L, int index);
+
+ + +

Moves the top element into the given position (and pops it), +without shifting any element +(therefore replacing the value at the given position). + +

+


lua_resume

+
+          int lua_resume (lua_State *L, int narg);
+
+ + +

Starts and resumes a coroutine in a given thread. + +

To start a coroutine, you first create a new thread +(see lua_newthread); +then you push on its stack the main function plus any eventual arguments; +then you call lua_resume, +with narg being the number of arguments. +This call returns when the coroutine suspends or finishes its execution. +When it returns, the stack contains all values passed to lua_yield, +or all values returned by the body function. +lua_resume returns +LUA_YIELD if the coroutine yields, +0 if the coroutine finishes its execution +without errors, +or an error code in case of errors (see lua_pcall). +In case of errors, +the stack is not unwound, +so you can use the debug API over it. +The error message is on the top of the stack. +To restart a coroutine, you put on its stack only the values to +be passed as results from yield, +and then call lua_resume. + +

+


lua_setallocf

+
+          void lua_setallocf (lua_State *L, lua_Alloc f, void *ud);
+
+ + +

Changes the allocator function of a given state to f +with user data ud. + +

+


lua_setfenv

+
+          int lua_setfenv (lua_State *L, int index);
+
+ + +

Pops a table from the stack and sets it as +the new environment for the value at the given index. +If the value at the given index is +neither a function nor a thread nor a userdata, +lua_setfenv returns 0. +Otherwise it returns 1. + +

+


lua_setfield

+
+          void lua_setfield (lua_State *L, int index, const char *k);
+
+ + +

Does the equivalent to t[k] = v, +where t is the value at the given valid index index +and v is the value at the top of the stack, + +

This function pops the value from the stack. +As in Lua, this function may trigger a metamethod +for the "newindex" event (see 2.8). + +

+


lua_setglobal

+
+          void lua_setglobal (lua_State *L, const char *name);
+
+ + +

Pops a value from the stack and +sets it as the new value of global name. +It is defined as a macro: +

+#define lua_setglobal(L,s)   lua_setfield(L, LUA_GLOBALSINDEX, s)
+
+ +

+


lua_setmetatable

+
+          int lua_setmetatable (lua_State *L, int index);
+
+ + +

Pops a table from the stack and +sets it as the new metatable for the value at the given +acceptable index. + +

+


lua_settable

+
+          void lua_settable (lua_State *L, int index);
+
+ + +

Does the equivalent to t[k] = v, +where t is the value at the given valid index index, +v is the value at the top of the stack, +and k is the value just below the top. + +

This function pops both the key and the value from the stack. +As in Lua, this function may trigger a metamethod +for the "newindex" event (see 2.8). + +

+


lua_settop

+
+          void lua_settop (lua_State *L, int index);
+
+ + +

Accepts any acceptable index, or 0, +and sets the stack top to this index. +If the new top is larger than the old one, +then the new elements are filled with nil. +If index is 0, then all stack elements are removed. + +

+


lua_State

+
+          typedef struct lua_State lua_State;
+
+ + +

Opaque structure that keeps the whole state of a Lua interpreter. +The Lua library is fully reentrant: +it has no global variables. +All information about a state is kept in this structure. + +

A pointer to this state must be passed as the first argument to +every function in the library, except to lua_newstate, +which creates a Lua state from scratch. + +

+


lua_status

+
+          int lua_status (lua_State *L);
+
+ + +

Returns the status of the thread L. + +

The status can be 0 for a normal thread, +an error code if the thread finished its execution with an error, +or LUA_YIELD if the thread is suspended. + +

+


lua_toboolean

+
+          int lua_toboolean (lua_State *L, int index);
+
+ + +

Converts the Lua value at the given acceptable index to a C boolean +value (0 or 1). +Like all tests in Lua, +lua_toboolean returns 1 for any Lua value +different from false and nil; +otherwise it returns 0. +It also returns 0 when called with a non-valid index. +(If you want to accept only actual boolean values, +use lua_isboolean to test the value's type.) + +

+


lua_tocfunction

+
+          lua_CFunction lua_tocfunction (lua_State *L, int index);
+
+ + +

Converts a value at the given acceptable index to a C function. +That value must be a C function; +otherwise, returns NULL. + +

+


lua_tointeger

+
+          lua_Integer lua_tointeger (lua_State *L, int idx);
+
+ + +

Converts the Lua value at the given acceptable index +to the signed integral type lua_Integer. +The Lua value must be a number or a string convertible to a number +(see 2.2.1); +otherwise, lua_tointeger returns 0. + +

If the number is not an integer, +it is truncated in some non-specified way. + +

+


lua_tolstring

+
+          const char *lua_tolstring (lua_State *L, int index, size_t *len);
+
+ + +

Converts the Lua value at the given acceptable index to a string +(const char*). +If len is not NULL, +it also sets *len with the string length. +The Lua value must be a string or a number; +otherwise, the function returns NULL. +If the value is a number, +then lua_tolstring also +changes the actual value in the stack to a string. +(This change confuses lua_next +when lua_tolstring is applied to keys during a table traversal.) + +

lua_tolstring returns a fully aligned pointer +to a string inside the Lua state. +This string always has a zero (`\0´) +after its last character (as in C), +but may contain other zeros in its body. +Because Lua has garbage collection, +there is no guarantee that the pointer returned by lua_tolstring +will be valid after the corresponding value is removed from the stack. + +

+


lua_tonumber

+
+          lua_Number lua_tonumber (lua_State *L, int index);
+
+ + +

Converts the Lua value at the given acceptable index +to a number (see lua_Number). +The Lua value must be a number or a string convertible to a number +(see 2.2.1); +otherwise, lua_tonumber returns 0. + +

+


lua_topointer

+
+          const void *lua_topointer (lua_State *L, int index);
+
+ + +

Converts the value at the given acceptable index to a generic +C pointer (void*). +The value may be a userdata, a table, a thread, or a function; +otherwise, lua_topointer returns NULL. +Lua ensures that different objects return different pointers. +There is no direct way to convert the pointer back to its original value. + +

Typically this function is used only for debug information. + +

+


lua_tostring

+
+          const char *lua_tostring (lua_State *L, int index);
+
+ + +

Equivalent to lua_tolstring with len equal to NULL. + +

+


lua_tothread

+
+          lua_State *lua_tothread (lua_State *L, int index);
+
+ + +

Converts the value at the given acceptable index to a Lua thread +(represented as lua_State*). +This value must be a thread; +otherwise, the function returns NULL. + +

+


lua_touserdata

+
+          void *lua_touserdata (lua_State *L, int index);
+
+ + +

If the value at the given acceptable index is a full userdata, +returns its block address. +If the value is a light userdata, +returns its pointer. +Otherwise, returns NULL. + +

+


lua_type

+
+          int lua_type (lua_State *L, int index);
+
+ + +

Returns the type of the value in the given acceptable index, +or LUA_TNONE for a non-valid index +(that is, an index to an "empty" stack position). +The types returned by lua_type are coded by the following constants +defined in lua.h: +LUA_TNIL, +LUA_TNUMBER, +LUA_TBOOLEAN, +LUA_TSTRING, +LUA_TTABLE, +LUA_TFUNCTION, +LUA_TUSERDATA, +LUA_TTHREAD, +and +LUA_TLIGHTUSERDATA. + +

+


lua_typename

+
+          const char *lua_typename  (lua_State *L, int tp);
+
+ + +

Returns the name of the type encoded by the value tp, +which must be one the values returned by lua_type. + +

+


lua_Writer

+
+          typedef int (*lua_Writer)
+                          (lua_State *L, const void* p, size_t sz, void* ud);
+
+
+ + +

The writer function used by lua_dump. +Every time it produces another piece of chunk, +lua_dump calls the writer, +passing along the buffer to be written (p), +its size (sz), +and the data parameter supplied to lua_dump. + +

The writer returns an error code: +0 means no errors; +any other value means an error and stops lua_dump from +calling the writer again. + +

+


lua_xmove

+
+          void lua_xmove (lua_State *from, lua_State *to, int n);
+
+ + +

Exchange values between different threads of the same global state. + +

This function pops n values from the stack from, +and pushes them onto the stack to. + +

+


lua_yield

+
+          int lua_yield  (lua_State *L, int nresults);
+
+ + +

Yields a coroutine. + +

This function should only be called as the +return expression of a C function, as follows: +

+       return lua_yield (L, nresults);
+
+When a C function calls lua_yield in that way, +the running coroutine suspends its execution, +and the call to lua_resume that started this coroutine returns. +The parameter nresults is the number of values from the stack +that are passed as results to lua_resume. + +

+

3.8 - The Debug Interface

+ +

Lua has no built-in debugging facilities. +Instead, it offers a special interface +by means of functions and hooks. +This interface allows the construction of different +kinds of debuggers, profilers, and other tools +that need "inside information" from the interpreter. + +

+


lua_Debug

+
+          typedef struct lua_Debug {
+            int event;
+            const char *name;           /* (n) */
+            const char *namewhat;       /* (n) */
+            const char *what;           /* (S) */
+            const char *source;         /* (S) */
+            int currentline;            /* (l) */
+            int nups;                   /* (u) number of upvalues */
+            int linedefined;            /* (S) */
+            int lastlinedefined;        /* (S) */
+            char short_src[LUA_IDSIZE]; /* (S) */
+            /* private part */
+            ...
+          } lua_Debug;
+
+
+ + +

A structure used to carry different pieces of +information about an active function. +lua_getstack fills only the private part +of this structure, for later use. +To fill the other fields of lua_Debug with useful information, +call lua_getinfo. + +

The fields of lua_Debug have the following meaning: +

+ +

+


lua_gethook

+
+          lua_Hook lua_gethook (lua_State *L);
+
+ + +

Returns the current hook function. + +

+


lua_gethookcount

+
+          int lua_gethookcount (lua_State *L);
+
+ + +

Returns the current hook count. + +

+


lua_gethookmask

+
+          int lua_gethookmask (lua_State *L);
+
+ + +

Returns the current hook mask. + +

+


lua_getinfo

+
+          int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar);
+
+ + +

Fills the fields of lua_Debug with useful information. + +

This function returns 0 on error +(for instance, an invalid option in what). +Each character in the string what +selects some fields of the structure ar to be filled, +as indicated by the letter in parentheses in the definition of lua_Debug: +`S´ fills in the fields source, linedefined, +lastlinedefined, +and what; +`l´ fills in the field currentline, etc. +Moreover, `f´ pushes onto the stack the function that is +running at the given level. + +

To get information about a function that is not active +(that is, not in the stack), +you push it onto the stack +and start the what string with the character `>´. +For instance, to know in which line a function f was defined, +you can write the following code: +

+       lua_Debug ar;
+       lua_getfield(L, LUA_GLOBALSINDEX, "f");  /* get global `f' */
+       lua_getinfo(L, ">S", &ar);
+       printf("%d\n", ar.linedefined);
+
+ +

+


lua_getlocal

+
+          const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n);
+
+ + +

Gets information about a local variable of a given activation record. +The parameter ar must be a valid activation record that was +filled by a previous call to lua_getstack or +given as argument to a hook (see lua_Hook). +The index n selects which local variable to inspect +(1 is the first parameter or active local variable, and so on, +until the last active local variable). +lua_getlocal pushes the variable's value onto the stack +and returns its name. + +

Variable names starting with `(´ (open parentheses) +represent internal variables +(loop control variables, temporaries, and C function locals). + +

Returns NULL (and pushes nothing) +when the index is greater than +the number of active local variables. + +

+


lua_getstack

+
+          int lua_getstack (lua_State *L, int level, lua_Debug *ar);
+
+ + +

Get information about the interpreter runtime stack. + +

This function fills parts of a lua_Debug structure with +an identification of the activation record +of the function executing at a given level. +Level 0 is the current running function, +whereas level n+1 is the function that has called level n. +When there are no errors, lua_getstack returns 1; +when called with a level greater than the stack depth, +it returns 0. + +

+


lua_getupvalue

+
+          const char *lua_getupvalue (lua_State *L, int funcindex, int n);
+
+ + +

Gets information about a closure's upvalue. +(For Lua functions, +upvalues are the external local variables that the function uses, +and that are consequently included in its closure.) +lua_getupvalue gets the index n of an upvalue, +pushes the upvalue's value onto the stack, +and returns its name. +funcindex points to the closure in the stack. +(Upvalues have no particular order, +as they are active through the whole function. +So, they are numbered in an arbitrary order.) + +

Returns NULL (and pushes nothing) +when the index is greater than the number of upvalues. +For C functions, this function uses the empty string "" +as a name for all upvalues. + +

+


lua_Hook

+
+          typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar);
+
+ + +

Type for debugging hook functions. + +

Whenever a hook is called, its ar argument has its field +event set to the specific event that triggered the hook. +Lua identifies these events with the following constants: +LUA_HOOKCALL, LUA_HOOKRET, +LUA_HOOKTAILRET, LUA_HOOKLINE, +and LUA_HOOKCOUNT. +Moreover, for line events, the field currentline is also set. +To get the value of any other field in ar, +the hook must call lua_getinfo. +For return events, event may be LUA_HOOKRET, +the normal value, or LUA_HOOKTAILRET. +In the latter case, Lua is simulating a return from +a function that did a tail call; +in this case, it is useless to call lua_getinfo. + +

While Lua is running a hook, it disables other calls to hooks. +Therefore, if a hook calls back Lua to execute a function or a chunk, +this execution occurs without any calls to hooks. + +

+


lua_sethook

+
+          int lua_sethook (lua_State *L, lua_Hook func, int mask, int count);
+
+ + +

Sets the debugging hook function. + +

func is the hook function. +mask specifies on which events the hook will be called: +It is formed by a bitwise or of the constants +LUA_MASKCALL, +LUA_MASKRET, +LUA_MASKLINE, +and LUA_MASKCOUNT. +The count argument is only meaningful when the mask +includes LUA_MASKCOUNT. +For each event, the hook is called as explained below: +

+ +

A hook is disabled by setting mask to zero. + +

+


lua_setlocal

+
+          const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n);
+
+ + +

Sets the value of a local variable of a given activation record. +Parameters ar and n are as in lua_getlocal +(see lua_getlocal). +lua_setlocal assigns the value at the top of the stack +to the variable and returns its name. +It also pops the value from the stack. + +

Returns NULL (and pops nothing) +when the index is greater than +the number of active local variables. + +

+


lua_setupvalue

+
+          const char *lua_setupvalue (lua_State *L, int funcindex, int n);
+
+ + +

Sets the value of a closure's upvalue. +Parameters funcindex and n are as in lua_getupvalue +(see lua_getupvalue). +It assigns the value at the top of the stack +to the upvalue and returns its name. +It also pops the value from the stack. + +

Returns NULL (and pops nothing) +when the index is greater than the number of upvalues. + +

+

4 - The Auxiliary Library

+ +

+The auxiliary library provides several convenient functions +to interface C with Lua. +While the basic API provides the primitive functions for all +interactions between C and Lua, +the auxiliary library provides higher-level functions for some +common tasks. + +

All functions from the auxiliary library +are defined in header file lauxlib.h and +have a prefix luaL_. + +

All functions in the auxiliary library are built on +top of the basic API, +and so they provide nothing that cannot be done with this API. + +

Several functions in the auxiliary library are used to +check C function arguments. +Their names are always luaL_check* or luaL_opt*. +All of these functions raise an error if the check is not satisfied. +Because the error message is formatted for arguments +(e.g., "bad argument #1"), +you should not use these functions for other stack values. + +

4.1 - Functions and Types

+ +

Here we list all functions and types from the auxiliary library +in alphabetical order. + +

+


luaL_addchar

+
+          void luaL_addchar (luaL_Buffer B, char c);
+
+ + +

Adds the character c to the buffer B +(see luaL_Buffer). + +

+


luaL_addlstring

+
+          void luaL_addlstring (luaL_Buffer *B, const char *s, size_t l);
+
+ + +

Adds the string pointed to by s with length l to +the buffer B +(see luaL_Buffer). +The string may contain embedded zeros. + +

+


luaL_addsize

+
+          void luaL_addsize (luaL_Buffer B, size_t n);
+
+ + +

Adds a string of length n previously copied to the +buffer area (see luaL_prepbuffer) to the buffer B +(see luaL_Buffer). + +

+


luaL_addstring

+
+          void luaL_addstring (luaL_Buffer *B, const char *s);
+
+ + +

Adds the zero-terminated string pointed to by s +to the buffer B +(see luaL_Buffer). +The string may not contain embedded zeros. + +

+


luaL_addvalue

+
+          void luaL_addvalue (luaL_Buffer *B);
+
+ + +

Adds the value at the top of the stack +to the buffer B +(see luaL_Buffer). +Pops the value. + +

This is the only function on string buffers that can (and must) +be called with an extra element on the stack, +which is the value to be added to the buffer. + +

+


luaL_argcheck

+
+          void luaL_argcheck (lua_State *L, int cond, int numarg,
+                              const char *extramsg);
+
+ + +

Checks whether cond is true. +If not, raises an error with message +"bad argument #<numarg> to <func> (<extramsg>)", +where func is retrieved from the call stack. + +

+


luaL_argerror

+
+          int luaL_argerror (lua_State *L, int numarg, const char *extramsg);
+
+ + +

Raises an error with message +"bad argument #<numarg> to <func> (<extramsg>)", +where func is retrieved from the call stack. + +

This function never returns, +but it is an idiom to use it as return luaL_argerror ... +in C functions. + +

+


luaL_Buffer

+
+          typedef struct luaL_Buffer luaL_Buffer;
+
+ + +

Type for a string buffer. + +

A string buffer allows C code to build Lua strings piecemeal. +Its pattern of use is as follows: +

+ +

During its normal operation, +a string buffer uses a variable number of stack slots. +So, while using a buffer, you cannot assume that you know where +the top of the stack is. +You can use the stack between successive calls to buffer operations +as long as that use is balanced; +that is, +when you call a buffer operation, +the stack is at the same level +it was immediately after the previous buffer operation. +(The only exception to this rule is luaL_addvalue.) +After calling luaL_pushresult the stack is back to its +level when the buffer was initialized, +plus the final string on its top. + +

+


luaL_buffinit

+
+          void luaL_buffinit (lua_State *L, luaL_Buffer *B);
+
+ + +

Initializes a buffer B. +This function does not allocate any space; +the buffer must be declared as a variable +(see luaL_Buffer). + +

+


luaL_callmeta

+
+          int luaL_callmeta (lua_State *L, int obj, const char *e);
+
+ + +

Calls a metamethod. + +

If the object at index obj has a metatable and this +metatable has a field e, +this function calls this field and passes the object as its only argument. +In this case this function returns 1 and pushes on the +stack the value returned by the call. +If there is no metatable or no metamethod, +this function returns 0 (without pushing any value on the stack). + +

+


luaL_checkany

+
+          void luaL_checkany (lua_State *L, int narg);
+
+ + +

Checks whether the function has an argument +of any type (including nil) at position narg. + +

+


luaL_checkint

+
+          int luaL_checkint (lua_State *L, int narg);
+
+ + +

Checks whether the function argument narg is a number +and returns this number cast to an int. + +

+


luaL_checkinteger

+
+          lua_Integer luaL_checkinteger (lua_State *L, int narg);
+
+ + +

Checks whether the function argument narg is a number +and returns this number cast to a lua_Integer. + +

+


luaL_checklong

+
+          long luaL_checklong (lua_State *L, int narg);
+
+ + +

Checks whether the function argument narg is a number +and returns this number cast to a long. + +

+


luaL_checklstring

+
+          const char *luaL_checklstring (lua_State *L, int narg, size_t *l);
+
+ + +

Checks whether the function argument narg is a string +and returns this string; +if l is not NULL fills *l +with the string's length. + +

+


luaL_checknumber

+
+          lua_Number luaL_checknumber (lua_State *L, int narg);
+
+ + +

Checks whether the function argument narg is a number +and returns this number. + +

+


luaL_checkoption

+
+          int luaL_checkoption (lua_State *L, int narg, const char *def,
+                                const char *const lst[]);
+
+ + +

Checks whether the function argument narg is a string and +searches for this string in the array lst +(which must be NULL-terminated). +If def is not NULL, +uses def as a default value when +the function has no argument narg or if this argument is nil. + +

Returns the index in the array where the string was found. +Raises an error if the argument is not a string or +if the string cannot be found. + +

This is a useful function for mapping strings to C enums. +The usual convention in Lua libraries is to use strings instead of numbers +to select options. + +

+


luaL_checkstack

+
+          void luaL_checkstack (lua_State *L, int sz, const char *msg);
+
+ + +

Grows the stack size to top + sz elements, +raising an error if the stack cannot grow to that size. +msg is an additional text to go into the error message. + +

+


luaL_checkstring

+
+          const char *luaL_checkstring (lua_State *L, int narg);
+
+ + +

Checks whether the function argument narg is a string +and returns this string. + +

+


luaL_checktype

+
+          void luaL_checktype (lua_State *L, int narg, int t);
+
+ + +

Checks whether the function argument narg has type t. + +

+


luaL_checkudata

+
+          void *luaL_checkudata (lua_State *L, int narg, const char *tname);
+
+ + +

Checks whether the function argument narg is a userdata +of the type tname (see luaL_newmetatable). + +

+


luaL_error

+
+          int luaL_error (lua_State *L, const char *fmt, ...);
+
+ + +

Raises an error. +The error message format is given by fmt +plus any extra arguments, +following the same rules of lua_pushfstring. +It also adds at the beginning of the message the file name and +the line number where the error occurred, +if this information is available. + +

This function never returns, +but it is an idiom to use it as return luaL_error ... +in C functions. + +

+


luaL_getmetafield

+
+          int luaL_getmetafield (lua_State *L, int obj, const char *e);
+
+ + +

Pushes on the stack the field e from the metatable +of the object at index obj. +If the object does not have a metatable, +or if the metatable does not have this field, +returns 0 and pushes nothing. + +

+


luaL_getmetatable

+
+          void luaL_getmetatable (lua_State *L, const char *tname);
+
+ + +

Pushes on the stack the metatable associated with name tname +in the registry (see luaL_newmetatable). + +

+


luaL_gsub

+
+          const char *luaL_gsub (lua_State *L, const char *s,
+                                 const char *p, const char *r);
+
+ + +

Creates a copy of string s by replacing +any occurrence of the string p +with the string r. +Pushes the resulting string on the stack and returns it. + +

+


luaL_loadbuffer

+
+          int luaL_loadbuffer (lua_State *L, const char *buff,
+                               size_t sz, const char *name);
+
+ + +

Loads a buffer as a Lua chunk. +This function uses lua_load to load the chunk in the +buffer pointed to by buff with size sz. + +

This function returns the same results as lua_load. +name is the chunk name, +used for debug information and error messages. + +

+


luaL_loadfile

+
+          int luaL_loadfile (lua_State *L, const char *filename);
+
+ + +

Loads a file as a Lua chunk. +This function uses lua_load to load the chunk in the file +named filename. +If filename is NULL, +then it loads from the standard input. +The first line in the file is ignored if it starts with a #. + +

This function returns the same results as lua_load, +but it has an extra error code LUA_ERRFILE +if it cannot open/read the file. + +

+


luaL_loadstring

+
+          int luaL_loadstring (lua_State *L, const char *s);
+
+ + +

Loads a string as a Lua chunk. +This function uses lua_load to load the chunk in +the zero-terminated string s. + +

This function returns the same results as lua_load. + +

+


luaL_newmetatable

+
+          int luaL_newmetatable (lua_State *L, const char *tname);
+
+ + +

If the registry already has the key tname, +returns 0. +Otherwise, +creates a new table to be used as a metatable for userdata, +adds it to the registry with key tname, +and returns 1. + +

In both cases pushes on the stack the final value associated +with tname in the registry. + +

+


luaL_newstate

+
+          lua_State *luaL_newstate (void);
+
+ + +

Creates a new Lua state, calling lua_newstate with an +allocation function based on the standard C realloc function +and setting a panic function (see lua_atpanic) that prints +an error message to the standard error output in case of fatal +errors. + +

Returns the new state, +or NULL if there is a memory allocation error. + +

+


luaL_openlibs

+
+          void luaL_openlibs (lua_State *L);
+
+ + +

Opens all standard Lua libraries into the given state. + +

+


luaL_optint

+
+          int luaL_optint (lua_State *L, int narg, int d);
+
+ + +

If the function argument narg is a number, +returns this number cast to an int. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + +

+


luaL_optinteger

+
+          lua_Integer luaL_optinteger (lua_State *L, int narg, lua_Integer d);
+
+ + +

If the function argument narg is a number, +returns this number cast to a lua_Integer. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + +

+


luaL_optlong

+
+          long luaL_optlong (lua_State *L, int narg, long d);
+
+ + +

If the function argument narg is a number, +returns this number cast to a long. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + +

+


luaL_optlstring

+
+          const char *luaL_optlstring (lua_State *L, int narg,
+                                       const char *d, size_t *l);
+
+ + +

If the function argument narg is a string, +returns this string. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + +

If l is not NULL, +fills the position *l with the results's length. + +

+


luaL_optnumber

+
+          lua_Number luaL_optnumber (lua_State *L, int narg, lua_Number d);
+
+ + +

If the function argument narg is a number, +returns this number. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + +

+


luaL_optstring

+
+          const char *luaL_optstring (lua_State *L, int narg, const char *d);
+
+ + +

If the function argument narg is a string, +returns this string. +If this argument is absent or is nil, +returns d. +Otherwise, raises an error. + +

+


luaL_prepbuffer

+
+          char *luaL_prepbuffer (luaL_Buffer *B);
+
+ + +

Returns an address to a space of size LUAL_BUFFERSIZE +where you can copy a string to be added to buffer B +(see luaL_Buffer). +After copying the string into this space you must call +luaL_addsize with the size of the string to actually add +it to the buffer. + +

+


luaL_pushresult

+
+          void luaL_pushresult (luaL_Buffer *B);
+
+ + +

Finishes the use of buffer B leaving the final string on +the top of the stack. + +

+


luaL_ref

+
+          int luaL_ref (lua_State *L, int t);
+
+ + +

Creates and returns a reference, +in the table at index t, +for the object at the top of the stack (and pops the object). + +

A reference is a unique integer key. +As long as you do not manually add integer keys into table t, +luaL_ref ensures the uniqueness of the key it returns. +You can retrieve an object referred by reference r +by calling lua_rawgeti(L, t, r). +Function luaL_unref frees a reference and its associated object. + +

If the object at the top of the stack is nil, +luaL_ref returns the constant LUA_REFNIL. +The constant LUA_NOREF is guaranteed to be different +from any reference returned by luaL_ref. + +

+


luaL_Reg

+
+          typedef struct luaL_Reg {
+            const char *name;
+            lua_CFunction func;
+          } luaL_Reg;
+
+
+ + +

Type for arrays of functions to be registered by +luaL_register. +name is the function name and func is a pointer to +the function. +Any array of luaL_Reg must end with an sentinel entry +in which both name and func are NULL. + +

+


luaL_register

+
+          void luaL_register (lua_State *L, const char *libname,
+                              const luaL_Reg *l);
+
+ + +

Opens a library. + +

When called with libname equal to NULL, +simply registers all functions in the list l +(see luaL_Reg) into the table on the top of the stack. + +

When called with a non-null libname, +creates a new table t, +sets it as the value of the global variable libname, +sets it as the value of package.loaded[libname], +and registers on it all functions in the list l. +If there is a table in package.loaded[libname] or in +variable libname, +reuses this table instead of creating a new one. + +

In any case the function leaves the table +on the top of the stack. + +

+


luaL_typename

+
+          const char *luaL_typename (lua_State *L, int idx);
+
+ + +

Returns the name of the type of the value at index idx. + +

+


luaL_typerror

+
+          int luaL_typerror (lua_State *L, int narg, const char *tname);
+
+ + +

Generates an error with a message like +

+<location>: bad argument <narg> to <function> (<tname> expected, got <realt>)
+
+where <location> is produced by luaL_where, +<function> is the name of the current function, +and <realt> is the type name of the actual argument. + +

+


luaL_unref

+
+          void luaL_unref (lua_State *L, int t, int ref);
+
+ + +

Releases reference ref from the table at index t +(see luaL_ref). +The entry is removed from the table, +so that the referred object can be collected. +The reference ref is also freed to be used again. + +

If ref is LUA_NOREF or LUA_REFNIL, +luaL_unref does nothing. + +

+


luaL_where

+
+          void luaL_where (lua_State *L, int lvl);
+
+ + +

Pushes on the stack a string identifying the current position +of the control at level lvl in the call stack. +Typically this string has the format <chunkname>:<currentline>:. +Level 0 is the running function, +level 1 is the function that called the running function, +etc. + +

This function is used to build a prefix for error messages. + +

+

5 - Standard Libraries

+ +

The standard Lua libraries provide useful functions +that are implemented directly through the C API. +Some of these functions provide essential services to the language +(e.g., type and getmetatable); +others provide access to "outside" services (e.g., I/O); +and others could be implemented in Lua itself, +but are quite useful or have critical performance requirements that +deserve an implementation in C (e.g., sort). + +

All libraries are implemented through the official C API +and are provided as separate C modules. +Currently, Lua has the following standard libraries: +

+Except for the basic and package libraries, +each library provides all its functions as fields of a global table +or as methods of its objects. + +

To have access to these libraries, +the C host program must call +luaL_openlibs, +which open all standard libraries. +Alternatively, +it can open them individually by calling +luaopen_base (for the basic library), +luaopen_package (for the package library), +luaopen_string (for the string library), +luaopen_table (for the table library), +luaopen_math (for the mathematical library), +luaopen_io (for the I/O and the Operating System libraries), +and luaopen_debug (for the debug library). +These functions are declared in lualib.h +and should not be called directly: +you must call them like any other Lua C function, +e.g., by using lua_call. + +

5.1 - Basic Functions

+ +

The basic library provides some core functions to Lua. +If you do not include this library in your application, +you should check carefully whether you need to provide +implementations for some of its facilities. + +


assert (v [, message])

+Issues an error when +the value of its argument v is false (i.e., nil or false); +otherwise, returns all its arguments. +message is an error message; +when absent, it defaults to "assertion failed!" + +


collectgarbage (opt [, arg])

+ +

This function is a generic interface to the garbage collector. +It performs different functions according to its first argument, opt: +

+ +


dofile (filename)

+Opens the named file and executes its contents as a Lua chunk. +When called without arguments, +dofile executes the contents of the standard input (stdin). +Returns all values returned by the chunk. +In case of errors, dofile propagates the error +to its caller (that is, dofile does not run in protected mode). + +


error (message [, level])

+Terminates the last protected function called +and returns message as the error message. +Function error never returns. + +

Usually, error adds some information about the error position +at the beginning of the message. +The level argument specifies how to get the error position. +With level 1 (the default), the error position is where the +error function was called. +Level 2 points the error to where the function +that called error was called; and so on. +Passing a level 0 avoids the addition of error position information +to the message. + +


_G

+A global variable (not a function) that +holds the global environment (that is, _G._G = _G). +Lua itself does not use this variable; +changing its value does not affect any environment, +nor vice-versa. +(Use setfenv to change environments.) + +


getfenv (f)

+Returns the current environment in use by the function. +f can be a Lua function or a number +that specifies the function at that stack level: +Level 1 is the function calling getfenv. +If the given function is not a Lua function, +or if f is 0, +getfenv returns the global environment. +The default for f is 1. + +


getmetatable (object)

+ +

If object does not have a metatable, returns nil. +Otherwise, +if the object's metatable has a "__metatable" field, +returns the associated value. +Otherwise, returns the metatable of the given object. + +


ipairs (t)

+ +

Returns three values: an iterator function, the table t, and 0, +so that the construction +

+       for i,v in ipairs(t) do ... end
+
+will iterate over the pairs (1,t[1]), (2,t[2]), ..., +up to the first integer key with a nil value in the table. + +

See next for the caveats of modifying the table during its traversal. + +


load (func [, chunkname])

+ +

Loads a chunk using function func to get its pieces. +Each call to func must return a string that concatenates +with previous results. +A return of nil (or no value) signals the end of the chunk. + +

If there are no errors, +returns the compiled chunk as a function; +otherwise, returns nil plus the error message. +The environment of the returned function is the global environment. + +

chunkname is used as the chunk name for error messages +and debug information. + +


loadfile ([filename])

+ +

Similar to load, +but gets the chunk from file filename +or from the standard input, +if no file name is given. + +


loadstring (string [, chunkname])

+ +

Similar to load, +but gets the chunk from the given string. + +

To load and run a given string, use the idiom +

+      assert(loadstring(s))()
+
+ +


next (table [, index])

+ +

Allows a program to traverse all fields of a table. +Its first argument is a table and its second argument +is an index in this table. +next returns the next index of the table +and its associated value. +When called with nil as its second argument, +next returns an initial index +and its associated value. +When called with the last index, +or with nil in an empty table, +next returns nil. +If the second argument is absent, then it is interpreted as nil. +In particular, +you can use next(t) to check whether a table is empty. + +

Lua has no declaration of fields. +There is no difference between a +field not present in a table or a field with value nil. +Therefore, next only considers fields with non-nil values. +The order in which the indices are enumerated is not specified, +even for numeric indices. +(To traverse a table in numeric order, +use a numerical for or the ipairs function.) + +

The behavior of next is undefined if, +during the traversal, +you assign any value to a non-existent field in the table. +You may however modify existing fields. +In particular, you may clear existing fields. + +


pairs (t)

+ +

Returns three values: the next function, the table t, and nil, +so that the construction +

+       for k,v in pairs(t) do ... end
+
+will iterate over all key--value pairs of table t. + +

See next for the caveats of modifying the table during its traversal. + +


pcall (f, arg1, arg2, ...)

+ +

Calls function f with +the given arguments in protected mode. +This means that any error inside f is not propagated; +instead, pcall catches the error +and returns a status code. +Its first result is the status code (a boolean), +which is true if the call succeeds without errors. +In such case, pcall also returns all results from the call, +after this first result. +In case of any error, pcall returns false plus the error message. + +


print (e1, e2, ...)

+Receives any number of arguments, +and prints their values to stdout, +using the tostring function to convert them to strings. +print is not intended for formatted output, +but only as a quick way to show a value, +typically for debugging. +For formatted output, use string.format. + +


rawequal (v1, v2)

+Checks whether v1 is equal to v2, +without invoking any metamethod. +Returns a boolean. + +


rawget (table, index)

+Gets the real value of table[index], +without invoking any metamethod. +table must be a table and +index any value different from nil. + +


rawset (table, index, value)

+Sets the real value of table[index] to value, +without invoking any metamethod. +table must be a table, +index any value different from nil, +and value any Lua value. + +


select (index, ...)

+ +

If index is a number, +returns all arguments after argument number index. +Otherwise, index must be the string "#", +and select returns the total number of extra arguments it received. + +


setfenv (f, table)

+ +

Sets the environment to be used by the given function. +f can be a Lua function or a number +that specifies the function at that stack level: +Level 1 is the function calling setfenv. +setfenv returns the given function. + +

As a special case, when f is 0 setfenv changes +the environment of the running thread. +In this case, setfenv returns no values. + +


setmetatable (table, metatable)

+ +

Sets the metatable for the given table. +(You cannot change the metatable of other types from Lua, only from C.) +If metatable is nil, +removes the metatable of the given table. +If the original metatable has a "__metatable" field, +raises an error. + +

This function returns table. + +


tonumber (e [, base])

+Tries to convert its argument to a number. +If the argument is already a number or a string convertible +to a number, then tonumber returns this number; +otherwise, it returns nil. + +

An optional argument specifies the base to interpret the numeral. +The base may be any integer between 2 and 36, inclusive. +In bases above 10, the letter `A´ (in either upper or lower case) +represents 10, `B´ represents 11, and so forth, +with `Z´ representing 35. +In base 10 (the default), the number may have a decimal part, +as well as an optional exponent part (see 2.1). +In other bases, only unsigned integers are accepted. + +


tostring (e)

+Receives an argument of any type and +converts it to a string in a reasonable format. +For complete control of how numbers are converted, +use string.format. + +

If the metatable of e has a "__tostring" field, +then tostring calls the corresponding value +with e as argument, +and uses the result of the call as its result. + +


type (v)

+Returns the type of its only argument, coded as a string. +The possible results of this function are +"nil" (a string, not the value nil), +"number", +"string", +"boolean, +"table", +"function", +"thread", +and "userdata". + +


unpack (list [, i [, j]])

+Returns the elements from the given table. +This function is equivalent to +
+  return list[i], list[i+1], ..., list[j]
+
+except that the above code can be written only for a fixed number +of elements. +By default, i is 1 and j is the length of the list, +as defined by the length operator (see 2.5.5). + +


_VERSION

+A global variable (not a function) that +holds a string containing the current interpreter version. +The current contents of this variable is "Lua 5.1". + +


xpcall (f, err)

+ +

This function is similar to pcall, +except that you can set a new error handler. + +

xpcall calls function f in protected mode, +using err as the error handler. +Any error inside f is not propagated; +instead, xpcall catches the error, +calls the err function with the original error object, +and returns a status code. +Its first result is the status code (a boolean), +which is true if the call succeeds without errors. +In this case, xpcall also returns all results from the call, +after this first result. +In case of any error, +xpcall returns false plus the result from err. + +

5.2 - Coroutine Manipulation

+ +

The operations related to coroutines comprise a sub-library of +the basic library and come inside the table coroutine. +See 2.11 for a general description of coroutines. + +


coroutine.create (f)

+ +

Creates a new coroutine, with body f. +f must be a Lua function. +Returns this new coroutine, +an object with type "thread". + +


coroutine.resume (co [, val1, ..., valn])

+ +

Starts or continues the execution of coroutine co. +The first time you resume a coroutine, +it starts running its body. +The values val1, ..., valn are passed +as the arguments to the body function. +If the coroutine has yielded, +resume restarts it; +the values val1, ..., valn are passed +as the results from the yield. + +

If the coroutine runs without any errors, +resume returns true plus any values passed to yield +(if the coroutine yields) or any values returned by the body function +(if the coroutine terminates). +If there is any error, +resume returns false plus the error message. + +


coroutine.running ()

+ +

Returns the running coroutine, +or nil when called by the main thread. + +


coroutine.status (co)

+ +

Returns the status of coroutine co, as a string: +"running", +if the coroutine is running (that is, it called status); +"suspended", if the coroutine is suspended in a call to yield, +or if it has not started running yet; +"normal" if the coroutine is active but not running +(that is, it has resumed another coroutine); +and "dead" if the coroutine has finished its body function, +or if it has stopped with an error. + +


coroutine.wrap (f)

+ +

Creates a new coroutine, with body f. +f must be a Lua function. +Returns a function that resumes the coroutine each time it is called. +Any arguments passed to the function behave as the +extra arguments to resume. +Returns the same values returned by resume, +except the first boolean. +In case of error, propagates the error. + +


coroutine.yield ([val1, ..., valn])

+ +

Suspends the execution of the calling coroutine. +The coroutine cannot be running a C function, +a metamethod, or an iterator. +Any arguments to yield are passed as extra results to resume. + +

5.3 - Modules

+ +

The package library provides basic +facilities for loading and building modules in Lua. +It exports two of its functions directly in the global environment: +require and module. +Everything else is exported in a table package. + +


module (name [, ...])

+ +

Creates a module. +If there is a table in package.loaded[name], +this table is the module. +Otherwise, if there is a global table t with the given name, +this table is the module. +Otherwise creates a new table t and +sets it as the value of the global name and +the value of package.loaded[name]. +This function also initializes t._NAME with the given name, +t._M with the module (t itself), +and t._PACKAGE with the package name +(the full module name minus last component; see below). +Finally, module sets t as the new environment +of the current function and the new value of package.loaded[name], +so that require returns t. + +

If name is a compound name +(that is, one with components separated by dots), +module creates (or reuses, if they already exist) +tables for each component. +For instance, if name is a.b.c, +then module stores the module table in field c of +field b of global a. + +

This function may receive optional options after +the module name, +where each option is a function to be applied over the module. + +


require (modname)

+ +

Loads the given module. +The function starts by looking into the table package.loaded +to determine whether modname is already loaded. +If it is, then require returns the value stored +at package.loaded[modname]. +Otherwise, it tries to find a loader for the module. + +

To find a loader, +first require queries package.preload[modname]. +If it has a value, +this value (which should be a function) is the loader. +Otherwise require searches for a Lua loader using the +path stored in package.path. +If that also fails, it searches for a C loader using the +path stored in package.cpath. +If that also fails, +it tries an all-in-one loader (see below). + +

When loading a C library, +require first uses a dynamic link facility to link the +application with the library. +Then it tries to find a C function inside this library to +be used as the loader. +The name of this C function is the string "luaopen_" +concatenated with a copy of the module name where each dot +is replaced by an underscore. +Moreover, if the module name has a hyphen, +its prefix up to (and including) the first hyphen is removed. +For instance, if the module name is a.v1-b.c, +the function name will be luaopen_b_c. + +

If require finds neither a Lua library nor a +C library for a module, +it calls the all-in-one loader. +This loader searches the C path for a library for +the root name of the given module. +For instance, when requiring a.b.c, +it will search for a C library for a. +If found, it looks into it for an open function for +the submodule; +in our example, that would be luaopen_a_b_c. +With this facility, a package can pack several C submodules +into one single library, +with each submodule keeping its original open function. + +

Once a loader is found, +require calls the loader with a single argument, modname. +If the loader returns any value, +require assigns it to package.loaded[modname]. +If the loader returns no value and +has not assigned any value to package.loaded[modname], +then require assigns true to this entry. +In any case, require returns the +final value of package.loaded[modname]. + +

If there is any error loading or running the module, +or if it cannot find any loader for the module, +then require signals an error. + +


package.cpath

+ +

The path used by require to search for a C loader. + +

Lua initializes the C path package.cpath in the same way +it initializes the Lua path package.path, +using the environment variable LUA_CPATH +(plus another default path defined in luaconf.h). + +


package.loaded

+ +

A table used by require to control which +modules are already loaded. +When you require a module modname and +package.loaded[modname] is not false, +require simply returns the value stored there. + +


package.loadlib (libname, funcname)

+ +

Dynamically links the host program with the C library libname. +Inside this library, looks for a function funcname +and returns this function as a C function. +(So, funcname must follow the protocol (see lua_CFunction)). + +

This is a low-level function. +It completely bypasses the package and module system. +Unlike require, +it does not perform any path searching and +does not automatically adds extensions. +libname must be the complete file name of the C library, +including if necessary a path and extension. +funcname must be the exact name exported by the C library +(which may depend on the C compiler and linker used). + +

This function is not supported by ANSI C. +As such, it is only available on some platforms +(Windows, Linux, Mac OS X, Solaris, BSD, +plus other Unix systems that support the dlfcn standard). + +


package.path

+ +

The path used by require to search for a Lua loader. + +

At start-up, Lua initializes this variable with +the value of the environment variable LUA_PATH or +with a default path defined in luaconf.h, +if the environment variable is not defined. +Any ";;" in the value of the environment variable +is replaced by the default path. + +

A path is a sequence of templates separated by semicolons. +For each template, require will change each interrogation +mark in the template by filename, +which is modname with each dot replaced by a +"directory separator" (such as "/" in Unix); +then it will try to load the resulting file name. +So, for instance, if the Lua path is +

+  "./?.lua;./?.lc;/usr/local/?/init.lua"
+
+the search for a Lua loader for module foo +will try to load the files +./foo.lua, ./foo.lc, and +/usr/local/foo/init.lua, in that order. + +


package.preload

+ +

A table to store loaders for specific modules +(see require). + +


package.seeall (module)

+ +

Sets a metatable for module with +its __index field referring to the global environment, +so that this module inherits values +from the global environment. +To be used as an option to function module. + +

5.4 - String Manipulation

+ +

This library provides generic functions for string manipulation, +such as finding and extracting substrings, and pattern matching. +When indexing a string in Lua, the first character is at position 1 +(not at 0, as in C). +Indices are allowed to be negative and are interpreted as indexing backwards, +from the end of the string. +Thus, the last character is at position -1, and so on. + +

The string library provides all its functions inside the table +string. +It also sets a metatable for strings +where the __index field points to the metatable itself. +Therefore, you can use the string functions in object-oriented style. +For instance, string.byte(s, i) +can be written as s:byte(i). + +


string.byte (s [, i [, j]])

+Returns the internal numerical codes of the characters s[i], +s[i+1], ..., s[j]. +The default value for i is 1; +the default value for j is i. + +

Note that numerical codes are not necessarily portable across platforms. + +


string.char (i1, i2, ...)

+Receives 0 or more integers. +Returns a string with length equal to the number of arguments, +in which each character has the internal numerical code equal +to its corresponding argument. + +

Note that numerical codes are not necessarily portable across platforms. + +


string.dump (function)

+ +

Returns a string containing a binary representation of the given function, +so that a later loadstring on this string returns +a copy of the function. +function must be a Lua function without upvalues. + +


string.find (s, pattern [, init [, plain]])

+Looks for the first match of +pattern in the string s. +If it finds a match, then find returns the indices of s +where this occurrence starts and ends; +otherwise, it returns nil. +A third, optional numerical argument init specifies +where to start the search; +its default value is 1 and may be negative. +A value of true as a fourth, optional argument plain +turns off the pattern matching facilities, +so the function does a plain "find substring" operation, +with no characters in pattern being considered "magic". +Note that if plain is given, then init must be given as well. + +

If the pattern has captures, +then in a successful match +the captured values are also returned, +after the two indices. + +


string.format (formatstring, e1, e2, ...)

+Returns a formatted version of its variable number of arguments +following the description given in its first argument (which must be a string). +The format string follows the same rules as the printf family of +standard C functions. +The only differences are that the options/modifiers +*, l, L, n, p, +and h are not supported +and that there is an extra option, q. +The q option formats a string in a form suitable to be safely read +back by the Lua interpreter: +The string is written between double quotes, +and all double quotes, newlines, embedded zeros, +and backslashes in the string +are correctly escaped when written. +For instance, the call +
+       string.format('%q', 'a string with "quotes" and \n new line')
+
+will produce the string: +
+"a string with \"quotes\" and \
+ new line"
+
+ +

The options c, d, E, e, f, +g, G, i, o, u, X, and x all +expect a number as argument, +whereas q and s expect a string. + +

This function does not accept string values +containing embedded zeros. + +


string.gmatch (s, pattern)

+Returns an iterator function that, +each time it is called, +returns the next captures from pattern over string s. + +

If pattern specifies no captures, +then the whole match is produced in each call. + +

As an example, the following loop +

+  s = "hello world from Lua"
+  for w in string.gmatch(s, "%a+") do
+    print(w)
+  end
+
+will iterate over all the words from string s, +printing one per line. +The next example collects all pairs key=value from the +given string into a table: +
+  t = {}
+  s = "from=world, to=Lua"
+  for k, v in string.gmatch(s, "(%w+)=(%w+)") do
+    t[k] = v
+  end
+
+ +


string.gsub (s, pattern, repl [, n])

+Returns a copy of s +in which all occurrences of the pattern have been +replaced by a replacement string specified by repl, +which may be a string, a table, or a function. +gsub also returns, as its second value, +the total number of substitutions made. + +

If repl is a string, then its value is used for replacement. +The character % works as an escape character: +Any sequence in repl of the form %n, +with n between 1 and 9, +stands for the value of the n-th captured substring (see below). +The sequence %0 stands for the whole match. +The sequence %% stands for a single %. + +

If repl is a table, then the table is queried for every match, +using the first capture as the key; +if the pattern specifies no captures, +then the whole match is used as the key. + +

If repl is a function, then this function is called every time a +match occurs, with all captured substrings passed as arguments, +in order; +if the pattern specifies no captures, +then the whole match is passed as a sole argument. + +

If the value returned by the table query or by the function call +is a string or a number, +then it is used as the replacement string; +otherwise, if it is false or nil, +then there is no replacement +(that is, the original match is kept in the string). + +

The optional last parameter n limits +the maximum number of substitutions to occur. +For instance, when n is 1 only the first occurrence of +pattern is replaced. + +

Here are some examples: +

+   x = string.gsub("hello world", "(%w+)", "%1 %1")
+   --> x="hello hello world world"
+
+   x = string.gsub("hello world", "%w+", "%0 %0", 1)
+   --> x="hello hello world"
+
+   x = string.gsub("hello world from Lua", "(%w+)%s*(%w+)", "%2 %1")
+   --> x="world hello Lua from"
+
+   x = string.gsub("home = $HOME, user = $USER", "%$(%w+)", os.getenv)
+   --> x="home = /home/roberto, user = roberto"
+
+   x = string.gsub("4+5 = $return 4+5$", "%$(.-)%$", function (s)
+         return loadstring(s)()
+       end)
+   --> x="4+5 = 9"
+
+   local t = {name="lua", version="5.1"}
+   x = string.gsub("$name%-$version.tar.gz", "%$(%w+)", t)
+   --> x="lua-5.1.tar.gz"
+
+ +


string.len (s)

+Receives a string and returns its length. +The empty string "" has length 0. +Embedded zeros are counted, +so "a\000bc\000" has length 5. + +


string.lower (s)

+Receives a string and returns a copy of this string with all +uppercase letters changed to lowercase. +All other characters are left unchanged. +The definition of what an uppercase letter is depends on the current locale. + +


string.match (s, pattern [, init])

+Looks for the first match of +pattern in the string s. +If it finds one, then match returns +the captures from the pattern; +otherwise it returns nil. +If pattern specifies no captures, +then the whole match is returned. +A third, optional numerical argument init specifies +where to start the search; +its default value is 1 and may be negative. + +


string.rep (s, n)

+Returns a string that is the concatenation of n copies of +the string s. + +


string.reverse (s)

+Returns a string that is the string s reversed. + +


string.sub (s, i [, j])

+Returns the substring of s that +starts at i and continues until j; +i and j may be negative. +If j is absent, then it is assumed to be equal to -1 +(which is the same as the string length). +In particular, +the call string.sub(s,1,j) returns a prefix of s +with length j, +and string.sub(s, -i) returns a suffix of s +with length i. + +


string.upper (s)

+Receives a string and returns a copy of this string with all +lowercase letters changed to uppercase. +All other characters are left unchanged. +The definition of what a lowercase letter is depends on the current locale. + +

Patterns

+ +

+A character class is used to represent a set of characters. +The following combinations are allowed in describing a character class: +

+For all classes represented by single letters (%a, %c, etc.), +the corresponding uppercase letter represents the complement of the class. +For instance, %S represents all non-space characters. + +

The definitions of letter, space, and other character groups +depend on the current locale. +In particular, the class [a-z] may not be equivalent to %l. + +

+A pattern item may be +

+ +

+A pattern is a sequence of pattern items. +A `^´ at the beginning of a pattern anchors the match at the +beginning of the subject string. +A `$´ at the end of a pattern anchors the match at the +end of the subject string. +At other positions, +`^´ and `$´ have no special meaning and represent themselves. + +

+A pattern may contain sub-patterns enclosed in parentheses; +they describe captures. +When a match succeeds, the substrings of the subject string +that match captures are stored (captured) for future use. +Captures are numbered according to their left parentheses. +For instance, in the pattern "(a*(.)%w(%s*))", +the part of the string matching "a*(.)%w(%s*)" is +stored as the first capture (and therefore has number 1); +the character matching "." is captured with number 2, +and the part matching "%s*" has number 3. + +

As a special case, the empty capture () captures +the current string position (a number). +For instance, if we apply the pattern "()aa()" on the +string "flaaap", there will be two captures: 3 and 5. + +

A pattern cannot contain embedded zeros. Use %z instead. + +

5.5 - Table Manipulation

+This library provides generic functions for table manipulation. +It provides all its functions inside the table table. + +

Most functions in the table library assume that the table +represents an array or a list. +For these functions, when we talk about the "length" of a table +we mean the result of the length operator. + +


table.concat (table [, sep [, i [, j]]])

+Returns table[i]..sep..table[i+1] ... sep..table[j]. +The default value for sep is the empty string, +the default for i is 1, +and the default for j is the length of the table. +If i is greater than j, returns the empty string. + +


table.insert (table, [pos,] value)

+ +

Inserts element value at position pos in table, +shifting up other elements to open space, if necessary. +The default value for pos is n+1, +where n is the length of the table (see 2.5.5), +so that a call table.insert(t,x) inserts x at the end +of table t. + +


table.maxn (table)

+ +

Returns the largest positive numerical index of the given table, +or zero if the table has no positive numerical indices. +(To do its job this function does a linear traversal of +the whole table.) + +


table.remove (table [, pos])

+ +

Removes from table the element at position pos, +shifting down other elements to close the space, if necessary. +Returns the value of the removed element. +The default value for pos is n, +where n is the length of the table, +so that a call table.remove(t) removes the last element +of table t. + +


table.sort (table [, comp])

+Sorts table elements in a given order, in-place, +from table[1] to table[n], +where n is the length of the table. +If comp is given, +then it must be a function that receives two table elements, +and returns true +when the first is less than the second +(so that not comp(a[i+1],a[i]) will be true after the sort). +If comp is not given, +then the standard Lua operator < is used instead. + +

The sort algorithm is not stable; +that is, elements considered equal by the given order +may have their relative positions changed by the sort. + +

5.6 - Mathematical Functions

+ +

This library is an interface to the standard C math library. +It provides all its functions inside the table math. +The library provides the following functions: + + + + + + + + +

+       math.abs     math.acos    math.asin    math.atan    math.atan2
+       math.ceil    math.cos     math.cosh    math.deg     math.exp
+       math.floor   math.fmod    math.frexp   math.ldexp   math.log
+       math.log10   math.max     math.min     math.modf    math.pow
+       math.rad     math.random  math.randomseed           math.sin
+       math.sinh    math.sqrt    math.tan     math.tanh
+
+plus a variable math.pi and +a variable math.huge, +with the value HUGE_VAL. +Most of these functions +are only interfaces to the corresponding functions in the C library. +All trigonometric functions work in radians. +The functions math.deg and math.rad convert +between radians and degrees. + +

The function math.max returns the maximum +value of its numeric arguments. +Similarly, math.min computes the minimum. +Both can be used with 1, 2, or more arguments. + +

The function math.modf corresponds to the modf C function. +It returns two values: +The integral part and the fractional part of its argument. +The function math.frexp also returns 2 values: +The normalized fraction and the exponent of its argument. + +

The functions math.random and math.randomseed +are interfaces to the simple random generator functions +rand and srand that are provided by ANSI C. +(No guarantees can be given for their statistical properties.) +When called without arguments, +math.random returns a pseudo-random real number +in the range [0,1). +When called with a number n, +math.random returns +a pseudo-random integer in the range [1,n]. +When called with two arguments, +l and u, +math.random returns a pseudo-random +integer in the range [l,u]. +The math.randomseed function sets a "seed" +for the pseudo-random generator: +Equal seeds produce equal sequences of numbers. + +

5.7 - Input and Output Facilities

+ +

The I/O library provides two different styles for file manipulation. +The first one uses implicit file descriptors; +that is, there are operations to set a default input file and a +default output file, +and all input/output operations are over these default files. +The second style uses explicit file descriptors. + +

When using implicit file descriptors, +all operations are supplied by table io. +When using explicit file descriptors, +the operation io.open returns a file descriptor +and then all operations are supplied as methods of the file descriptor. + +

The table io also provides +three predefined file descriptors with their usual meanings from C: +io.stdin, io.stdout, and io.stderr. + +

Unless otherwise stated, +all I/O functions return nil on failure +(plus an error message as a second result) +and some value different from nil on success. + +


io.close ([file])

+ +

Equivalent to file:close(). +Without a file, closes the default output file. + +


io.flush ()

+ +

Equivalent to file:flush over the default output file. + +


io.input ([file])

+ +

When called with a file name, it opens the named file (in text mode), +and sets its handle as the default input file. +When called with a file handle, +it simply sets this file handle as the default input file. +When called without parameters, +it returns the current default input file. + +

In case of errors this function raises the error, +instead of returning an error code. + +


io.lines ([filename])

+ +

Opens the given file name in read mode +and returns an iterator function that, +each time it is called, +returns a new line from the file. +Therefore, the construction +

+       for line in io.lines(filename) do ... end
+
+will iterate over all lines of the file. +When the iterator function detects the end of file, +it returns nil (to finish the loop) and automatically closes the file. + +

The call io.lines() (without a file name) is equivalent +to io.input():lines(); +that is, it iterates over the lines of the default input file. +In this case it does not close the file when the loop ends. + +


io.open (filename [, mode])

+ +

This function opens a file, +in the mode specified in the string mode. +It returns a new file handle, +or, in case of errors, nil plus an error message. + +

The mode string can be any of the following: +

+The mode string may also have a `b´ at the end, +which is needed in some systems to open the file in binary mode. +This string is exactly what is used in the +standard C function fopen. + +


io.output ([file])

+ +

Similar to io.input, but operates over the default output file. + +


io.popen ([prog [, mode]])

+ +

Starts program prog in a separated process and returns +a file handle that you can use to read data from this program +(if mode is "r", the default) +or to write data to this program +(if mode is "w"). + +

This function is system dependent and is not available +on all platforms. + +


io.read (format1, ...)

+ +

Equivalent to io.input():read. + +


io.tmpfile ()

+ +

Returns a handle for a temporary file. +This file is opened in update mode +and it is automatically removed when the program ends. + +


io.type (obj)

+ +

Checks whether obj is a valid file handle. +Returns the string "file" if obj is an open file handle, +"closed file" if obj is a closed file handle, +or nil if obj is not a file handle. + +


io.write (value1, ...)

+ +

Equivalent to io.output():write. + +


file:close ()

+ +

Closes file. +Note that files are automatically closed when +their handles are garbage collected, +but that takes an unpredictable amount of time to happen. + +


file:flush ()

+ +

Saves any written data to file. + +


file:lines ()

+ +

Returns an iterator function that, +each time it is called, +returns a new line from the file. +Therefore, the construction +

+       for line in file:lines() do ... end
+
+will iterate over all lines of the file. +(Unlike io.lines, this function does not close the file +when the loop ends.) + +


file:read (format1, ...)

+ +

Reads the file file, +according to the given formats, which specify what to read. +For each format, +the function returns a string (or a number) with the characters read, +or nil if it cannot read data with the specified format. +When called without formats, +it uses a default format that reads the entire next line +(see below). + +

The available formats are +

+ +


file:seek ([whence] [, offset])

+ +

Sets and gets the file position, +measured from the beginning of the file, +to the position given by offset plus a base +specified by the string whence, as follows: +

+In case of success, function seek returns the final file position, +measured in bytes from the beginning of the file. +If this function fails, it returns nil, +plus a string describing the error. + +

The default value for whence is "cur", +and for offset is 0. +Therefore, the call file:seek() returns the current +file position, without changing it; +the call file:seek("set") sets the position to the +beginning of the file (and returns 0); +and the call file:seek("end") sets the position to the +end of the file, and returns its size. + +


file:setvbuf (mode [, size])

+ +

Sets the buffering mode for an output file. +There are three available modes: +

+For the last two cases, sizes +specifies the size of the buffer, in bytes. +The default is an appropriate size. + +


file:write (value1, ...)

+ +

Writes the value of each of its arguments to +the file. +The arguments must be strings or numbers. +To write other values, +use tostring or string.format before write. + +

5.8 - Operating System Facilities

+ +

This library is implemented through table os. + +


os.clock ()

+ +

Returns an approximation of the amount in seconds of CPU time +used by the program. + +


os.date ([format [, time]])

+ +

Returns a string or a table containing date and time, +formatted according to the given string format. + +

If the time argument is present, +this is the time to be formatted +(see the os.time function for a description of this value). +Otherwise, date formats the current time. + +

If format starts with `!´, +then the date is formatted in Coordinated Universal Time. +After this optional character, +if format is *t, +then date returns a table with the following fields: +year (four digits), month (1--12), day (1--31), +hour (0--23), min (0--59), sec (0--61), +wday (weekday, Sunday is 1), +yday (day of the year), +and isdst (daylight saving flag, a boolean). + +

If format is not *t, +then date returns the date as a string, +formatted according to the same rules as the C function strftime. + +

When called without arguments, +date returns a reasonable date and time representation that depends on +the host system and on the current locale +(that is, os.date() is equivalent to os.date("%c")). + +


os.difftime (t2, t1)

+ +

Returns the number of seconds from time t1 to time t2. +In POSIX, Windows, and some other systems, +this value is exactly t2-t1. + +


os.execute ([command])

+ +

This function is equivalent to the C function system. +It passes command to be executed by an operating system shell. +It returns a status code, which is system-dependent. +If command is absent, then it returns nonzero if a shell is available +and zero otherwise. + +


os.exit ([code])

+ +

Calls the C function exit, +with an optional code, +to terminate the host program. +The default value for code is the success code. + +


os.getenv (varname)

+ +

Returns the value of the process environment variable varname, +or nil if the variable is not defined. + +


os.remove (filename)

+ +

Deletes the file or directory with the given name. +Directories must be empty to be removed. +If this function fails, it returns nil, +plus a string describing the error. + +


os.rename (oldname, newname)

+ +

Renames file or directory named oldname to newname. +If this function fails, it returns nil, +plus a string describing the error. + +


os.setlocale (locale [, category])

+ +

Sets the current locale of the program. +locale is a string specifying a locale; +category is an optional string describing which category to change: +"all", "collate", "ctype", +"monetary", "numeric", or "time"; +the default category is "all". +The function returns the name of the new locale, +or nil if the request cannot be honored. + +


os.time ([table])

+ +

Returns the current time when called without arguments, +or a time representing the date and time specified by the given table. +This table must have fields year, month, and day, +and may have fields hour, min, sec, and isdst +(for a description of these fields, see the os.date function). + +

The returned value is a number, whose meaning depends on your system. +In POSIX, Windows, and some other systems, this number counts the number +of seconds since some given start time (the "epoch"). +In other systems, the meaning is not specified, +and the number returned by time can be used only as an argument to +date and difftime. + +


os.tmpname ()

+ +

Returns a string with a file name that can +be used for a temporary file. +The file must be explicitly opened before its use +and explicitly removed when no longer needed. + +

5.9 - The Debug Library

+ +

This library provides +the functionality of the debug interface to Lua programs. +You should exert care when using this library. +The functions provided here should be used exclusively for debugging +and similar tasks, such as profiling. +Please resist the temptation to use them as a +usual programming tool: +They can be very slow. +Moreover, several of its functions +violate some assumptions about Lua code +(e.g., that variables local to a function +cannot be accessed from outside or +that userdata metatables cannot be changed by Lua code) +and therefore can compromise otherwise secure code. + +

All functions in this library are provided +inside the debug table. + +


debug.debug ()

+ +

Enters an interactive mode with the user, +running each string that the user enters. +Using simple commands and other debug facilities, +the user can inspect global and local variables, +change their values, evaluate expressions, and so on. +A line containing only the word cont finishes this function, +so that the caller continues its execution. + +

Note that commands for debug.debug are not lexically nested +within any function, and so have no direct access to local variables. + +


debug.getfenv (o)

+Returns the environment of object o. + +


debug.gethook ()

+ +

Returns the current hook settings, as three values: +the current hook function, the current hook mask, +and the current hook count +(as set by the debug.sethook function). + +


debug.getinfo (function [, what])

+ +

Returns a table with information about a function. +You can give the function directly, +or you can give a number as the value of function, +which means the function running at level function of the call stack: +Level 0 is the current function (getinfo itself); +level 1 is the function that called getinfo; +and so on. +If function is a number larger than the number of active functions, +then getinfo returns nil. + +

The returned table contains all the fields returned by lua_getinfo, +with the string what describing which fields to fill in. +The default for what is to get all information available. +If present, +the option `f´ +adds a field named func with the function itself. + +

For instance, the expression debug.getinfo(1,"n").name returns +a name of the current function, if a reasonable name can be found, +and debug.getinfo(print) returns a table with all available information +about the print function. + +


debug.getlocal (level, local)

+ +

This function returns the name and the value of the local variable +with index local of the function at level level of the stack. +(The first parameter or local variable has index 1, and so on, +until the last active local variable.) +The function returns nil if there is no local +variable with the given index, +and raises an error when called with a level out of range. +(You can call debug.getinfo to check whether the level is valid.) + +

Variable names starting with `(´ (open parentheses) +represent internal variables +(loop control variables, temporaries, and C function locals). + +


debug.getmetatable (object)

+ +

Returns the metatable of the given object +or nil if it does not have a metatable. + +


debug.getregistry ()

+ +

Returns the registry table (see 3.5). + +


debug.getupvalue (func, up)

+ +

This function returns the name and the value of the upvalue +with index up of the function func. +The function returns nil if there is no upvalue with the given index. + +


debug.setfenv (object, table)

+ +

Sets the environment of the given object to the given table. + +


debug.sethook (hook, mask [, count])

+ +

Sets the given function as a hook. +The string mask and the number count describe +when the hook will be called. +The string mask may have the following characters, +with the given meaning: +

+With a count different from zero, +the hook is called after every count instructions. + +

When called without arguments, +debug.sethook turns off the hook. + +

When the hook is called, its first parameter is a string +describing the event that has triggered its call: +"call", "return" (or "tail return"), +"line", and "count". +For line events, +the hook also gets the new line number as its second parameter. +Inside a hook, +you can call getinfo with level 2 to get more information about +the running function +(level 0 is the getinfo function, +and level 1 is the hook function), +unless the event is "tail return". +In this case, Lua is only simulating the return, +and a call to getinfo will return invalid data. + +


debug.setlocal (level, local, value)

+ +

This function assigns the value value to the local variable +with index local of the function at level level of the stack. +The function returns nil if there is no local +variable with the given index, +and raises an error when called with a level out of range. +(You can call getinfo to check whether the level is valid.) +Otherwise, it returns the name of the local variable. + +


debug.setmetatable (object, table)

+ +

Sets the metatable for the given object to the given table +(which can be nil). + +


debug.setupvalue (func, up, value)

+ +

This function assigns the value value to the upvalue +with index up of the function func. +The function returns nil if there is no upvalue +with the given index. +Otherwise, it returns the name of the upvalue. + +


debug.traceback ([message])

+ +

Returns a string with a traceback of the call stack. +An optional message string is appended +at the beginning of the traceback. +This function is typically used with xpcall to produce +better error messages. + +

+

6 - Lua Stand-alone

+ +

Although Lua has been designed as an extension language, +to be embedded in a host C program, +it is also frequently used as a stand-alone language. +An interpreter for Lua as a stand-alone language, +called simply lua, +is provided with the standard distribution. +The stand-alone interpreter includes +all standard libraries, including the debug library. +Its usage is: +

+      lua [options] [script [args]]
+
+The options are: + +After handling its options, lua runs the given script, +passing to it the given args as string arguments. +When called without arguments, +lua behaves as lua -v -i +when the standard input (stdin) is a terminal, +and as lua - otherwise. + +

Before running any argument, +the interpreter checks for an environment variable LUA_INIT. +If its format is @filename, +then lua executes the file. +Otherwise, lua executes the string itself. + +

All options are handled in order, except -i. +For instance, an invocation like +

+       $ lua -e'a=1' -e 'print(a)' script.lua
+
+will first set a to 1, then print the value of a (which is `1´), +and finally run the file script.lua with no arguments. +(Here $ is the shell prompt. Your prompt may be different.) + +

Before starting to run the script, +lua collects all arguments in the command line +in a global table called arg. +The script name is stored at index 0, +the first argument after the script name goes to index 1, +and so on. +Any arguments before the script name +(that is, the interpreter name plus the options) +go to negative indices. +For instance, in the call +

+       $ lua -la b.lua t1 t2
+
+the interpreter first runs the file a.lua, +then creates a table +
+       arg = { [-2] = "lua", [-1] = "-la",
+               [0] = "b.lua",
+               [1] = "t1", [2] = "t2" }
+
+and finally runs the file b.lua. +The script is called with arg[1], arg[2], ... +as arguments; +it can also access these arguments with the vararg expression `...´. + +

In interactive mode, +if you write an incomplete statement, +the interpreter waits for its completion +by issuing a different prompt. + +

If the global variable _PROMPT contains a string, +then its value is used as the prompt. +Similarly, if the global variable _PROMPT2 contains a string, +its value is used as the secondary prompt +(issued during incomplete statements). +Therefore, both prompts can be changed directly on the command line. +For instance, +

+       $ lua -e"_PROMPT='myprompt> '" -i
+
+(the outer pair of quotes is for the shell, +the inner pair is for Lua), +or in any Lua programs by assigning to _PROMPT. +Note the use of -i to enter interactive mode; otherwise, +the program would just end silently right after the assignment to _PROMPT. + +

To allow the use of Lua as a +script interpreter in Unix systems, +the stand-alone interpreter skips +the first line of a chunk if it starts with #. +Therefore, Lua scripts can be made into executable programs +by using chmod +x and the #! form, +as in +

+#!/usr/local/bin/lua
+
+(Of course, +the location of the Lua interpreter may be different in your machine. +If lua is in your PATH, +then +
+#!/usr/bin/env lua
+
+is a more portable solution.) + +


+ +

Incompatibilities with the Previous Version

+ + +

Here we list the incompatibilities that may be found when moving a program +from Lua 5.0 to Lua 5.1. +You can avoid most of the incompatibilities compiling Lua with +appropriate options (see file luaconf.h). +However, +all these compatibility options will be removed in the next version of Lua. + +

Incompatibilities with version 5.0

+ +

Changes in the Language

+ + +

Changes in the Libraries

+ + +

Changes in the API

+ + +

+ +

The Complete Syntax of Lua

+ + +

Here is the complete syntax of Lua in extended BNF. +It does not describe operator priorities or some syntactical restrictions, +such as return and break statements +can only appear as the last statement of a block. + +

+ +

+
+	chunk ::= {stat [`;´]} [laststat[`;´]]
+
+	block ::= chunk
+
+	stat ::=  varlist1 `=´ explist1  | 
+		 functioncall  | 
+		 do block end  | 
+		 while exp do block end  | 
+		 repeat block until exp  | 
+		 if exp then block {elseif exp then block} [else block] end  | 
+		 for Name `=´ exp `,´ exp [`,´ exp] do block end  | 
+		 for namelist in explist1 do block end  | 
+		 function funcname funcbody  | 
+		 local function Name funcbody  | 
+		 local namelist [`=´ explist1] 
+
+	laststat ::= return [explist1]  |  break
+
+	funcname ::= Name {`.´ Name} [`:´ Name]
+
+	varlist1 ::= var {`,´ var}
+
+	var ::=  Name  |  prefixexp `[´ exp `]´  |  prefixexp `.´ Name 
+
+	namelist ::= Name {`,´ Name}
+
+	explist1 ::= {exp `,´} exp
+
+	exp ::=  nil  |  false  |  true  |  Number  |  String  |  `...´  | 
+		 function  |  prefixexp  |  tableconstructor  |  exp binop exp  |  unop exp 
+
+	prefixexp ::= var  |  functioncall  |  `(´ exp `)´
+
+	functioncall ::=  prefixexp args  |  prefixexp `:´ Name args 
+
+	args ::=  `(´ [explist1] `)´  |  tableconstructor  |  String 
+
+	function ::= function funcbody
+
+	funcbody ::= `(´ [parlist1] `)´ block end
+
+	parlist1 ::= namelist [`,´ `...´]  |  `...´
+
+	tableconstructor ::= `{´ [fieldlist] `}´
+
+	fieldlist ::= field {fieldsep field} [fieldsep]
+
+	field ::= `[´ exp `]´ `=´ exp  |  Name `=´ exp  |  exp
+
+	fieldsep ::= `,´  |  `;´
+
+	binop ::= `+´  |  `-´  |  `*´  |  `/´  |  `^´  |  `%´  |  `..´  | 
+		 `<´  |  `<=´  |  `>´  |  `>=´  |  `==´  |  `~=´  | 
+		 and  |  or
+
+	unop ::= `-´  |  not  |  `#´
+
+
+ +

+ +

+ + + diff --git a/deps/lua/doc/readme.html b/deps/lua/doc/readme.html new file mode 100644 index 0000000000000000000000000000000000000000..db20a69a00ae2fac45a18d7115db3e222846ac6e --- /dev/null +++ b/deps/lua/doc/readme.html @@ -0,0 +1,32 @@ + + +Lua documentation + + + + + +


+

+Lua +Documentation +

+ + + +
+ +Last update: +Wed Sep 7 12:57:50 BRST 2005 + + + + diff --git a/deps/lua/etc/README b/deps/lua/etc/README new file mode 100644 index 0000000000000000000000000000000000000000..ad9ca6ab8731054eb8bf02783d5a38fce9b11ee4 --- /dev/null +++ b/deps/lua/etc/README @@ -0,0 +1,36 @@ +This directory contains some useful files and code. +Unlike the code in ../src, everything here is in the public domain. + +If any of the makes fail, you're probably not using the same libraries +used to build Lua. Set MYLIBS in Makefile accordingly. + +all.c + Full Lua interpreter in a single file. + Do "make one" for a demo. + +lua.hpp + Lua header files for C++ using 'extern "C"'. + +lua.ico + A Lua icon for Windows (and web sites: save as favicon.ico). + Drawn by hand by Markus Gritsch . + +lua.pc + pkg-config data for Lua + +luavs.bat + Script to build Lua under "Visual Studio .NET Command Prompt". + +min.c + A minimal Lua interpreter. + Good for learning and for starting your own. + Do "make min" for a demo. + +noparser.c + Linking with noparser.o avoids loading the parsing modules in lualib.a. + Do "make noparser" for a demo. + +strict.lua + Traps uses of undeclared global variables. + Do "make strict" for a demo. + diff --git a/deps/lua/etc/all.c b/deps/lua/etc/all.c new file mode 100644 index 0000000000000000000000000000000000000000..dab68fac58f8f5a9a8b61bedc2b4a13716b1b18e --- /dev/null +++ b/deps/lua/etc/all.c @@ -0,0 +1,38 @@ +/* +* all.c -- Lua core, libraries and interpreter in a single file +*/ + +#define luaall_c + +#include "lapi.c" +#include "lcode.c" +#include "ldebug.c" +#include "ldo.c" +#include "ldump.c" +#include "lfunc.c" +#include "lgc.c" +#include "llex.c" +#include "lmem.c" +#include "lobject.c" +#include "lopcodes.c" +#include "lparser.c" +#include "lstate.c" +#include "lstring.c" +#include "ltable.c" +#include "ltm.c" +#include "lundump.c" +#include "lvm.c" +#include "lzio.c" + +#include "lauxlib.c" +#include "lbaselib.c" +#include "ldblib.c" +#include "liolib.c" +#include "linit.c" +#include "lmathlib.c" +#include "loadlib.c" +#include "loslib.c" +#include "lstrlib.c" +#include "ltablib.c" + +#include "lua.c" diff --git a/deps/lua/etc/lua.hpp b/deps/lua/etc/lua.hpp new file mode 100644 index 0000000000000000000000000000000000000000..ec417f59469c8672a13a726afe6186cccbe54895 --- /dev/null +++ b/deps/lua/etc/lua.hpp @@ -0,0 +1,9 @@ +// lua.hpp +// Lua header files for C++ +// <> not supplied automatically because Lua also compiles as C++ + +extern "C" { +#include "lua.h" +#include "lualib.h" +#include "lauxlib.h" +} diff --git a/deps/lua/etc/lua.ico b/deps/lua/etc/lua.ico new file mode 100644 index 0000000000000000000000000000000000000000..ccbabc4e2004683f29598a991006d7caff6d837d Binary files /dev/null and b/deps/lua/etc/lua.ico differ diff --git a/deps/lua/etc/lua.pc b/deps/lua/etc/lua.pc new file mode 100644 index 0000000000000000000000000000000000000000..5fd2b1ed1fe06edf380f6c4d8b97255a4bd7cc86 --- /dev/null +++ b/deps/lua/etc/lua.pc @@ -0,0 +1,27 @@ +# lua.pc -- pkg-config data for Lua + +# vars from install Makefile + +# grep '^V=' ../Makefile +V= 5.1 + +# grep '^INSTALL_.*=' ../Makefile | sed 's/INSTALL_TOP/prefix/' +prefix= /usr/local +INSTALL_BIN= ${prefix}/bin +INSTALL_INC= ${prefix}/include +INSTALL_LIB= ${prefix}/lib +INSTALL_MAN= ${prefix}/man/man1 +INSTALL_LMOD= ${prefix}/share/lua/${V} +INSTALL_CMOD= ${prefix}/lib/lua/${V} + +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include + +Name: Lua +Description: An Extensible Extension Language +Version: ${V} +Requires: +Libs: -L${libdir} -llua -lm +Cflags: -I${includedir} + diff --git a/deps/lua/etc/luavs.bat b/deps/lua/etc/luavs.bat new file mode 100644 index 0000000000000000000000000000000000000000..eea175e3e0f42b58b968fb5ceead8cc15363eab6 --- /dev/null +++ b/deps/lua/etc/luavs.bat @@ -0,0 +1,7 @@ +cd src +cl /O2 /W3 /c /DLUA_BUILD_AS_DLL l*.c +del lua.obj luac.obj +link /DLL /out:lua51.dll l*.obj +cl /O2 /W3 /c /DLUA_BUILD_AS_DLL lua.c +link /out:lua.exe lua.obj lua51.lib +cd .. diff --git a/deps/lua/etc/min.c b/deps/lua/etc/min.c new file mode 100644 index 0000000000000000000000000000000000000000..404bd503853d346fe0afc268ea578144fe069a38 --- /dev/null +++ b/deps/lua/etc/min.c @@ -0,0 +1,39 @@ +/* +* min.c -- a minimal Lua interpreter +* loads stdin only with minimal error handling. +* no interaction, and no standard library, only a "print" function. +*/ + +#include + +#include "lua.h" +#include "lauxlib.h" + +static int print(lua_State *L) +{ + int n=lua_gettop(L); + int i; + for (i=1; i<=n; i++) + { + if (i>1) printf("\t"); + if (lua_isstring(L,i)) + printf("%s",lua_tostring(L,i)); + else if (lua_isnil(L,i)==2) + printf("%s","nil"); + else if (lua_isboolean(L,i)) + printf("%s",lua_toboolean(L,i) ? "true" : "false"); + else + printf("%s:%p",luaL_typename(L,i),lua_topointer(L,i)); + } + printf("\n"); + return 0; +} + +int main(void) +{ + lua_State *L=lua_open(); + lua_register(L,"print",print); + if (luaL_dofile(L,NULL)!=0) fprintf(stderr,"%s\n",lua_tostring(L,-1)); + lua_close(L); + return 0; +} diff --git a/deps/lua/etc/noparser.c b/deps/lua/etc/noparser.c new file mode 100644 index 0000000000000000000000000000000000000000..13ba5462399acf46594407d6f2c92f59be4f5526 --- /dev/null +++ b/deps/lua/etc/noparser.c @@ -0,0 +1,50 @@ +/* +* The code below can be used to make a Lua core that does not contain the +* parsing modules (lcode, llex, lparser), which represent 35% of the total core. +* You'll only be able to load binary files and strings, precompiled with luac. +* (Of course, you'll have to build luac with the original parsing modules!) +* +* To use this module, simply compile it ("make noparser" does that) and list +* its object file before the Lua libraries. The linker should then not load +* the parsing modules. To try it, do "make luab". +* +* If you also want to avoid the dump module (ldump.o), define NODUMP. +* #define NODUMP +*/ + +#define LUA_CORE + +#include "llex.h" +#include "lparser.h" +#include "lzio.h" + +LUAI_FUNC void luaX_init (lua_State *L) { + UNUSED(L); +} + +LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) { + UNUSED(z); + UNUSED(buff); + UNUSED(name); + lua_pushliteral(L,"parser not loaded"); + lua_error(L); + return NULL; +} + +#ifdef NODUMP +#include "lundump.h" + +LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip) { + UNUSED(f); + UNUSED(w); + UNUSED(data); + UNUSED(strip); +#if 1 + UNUSED(L); + return 0; +#else + lua_pushliteral(L,"dumper not loaded"); + lua_error(L); +#endif +} +#endif diff --git a/deps/lua/etc/strict.lua b/deps/lua/etc/strict.lua new file mode 100644 index 0000000000000000000000000000000000000000..7c9fa159473a1bdf99296f863311417b6b806a11 --- /dev/null +++ b/deps/lua/etc/strict.lua @@ -0,0 +1,34 @@ +-- +-- strict.lua +-- checks uses of undeclared global variables +-- All global variables must be 'declared' through a regular assignment +-- (even assigning nil will do) in a main chunk before being used +-- anywhere or assigned to inside a function. +-- + +local mt = getmetatable(_G) +if mt == nil then + mt = {} + setmetatable(_G, mt) +end + +mt.__declared = {} + +mt.__newindex = function (t, n, v) + if not mt.__declared[n] then + local w = debug.getinfo(2, "S").what + if w ~= "main" and w ~= "C" then + error("assign to undeclared variable '"..n.."'", 2) + end + mt.__declared[n] = true + end + rawset(t, n, v) +end + +mt.__index = function (t, n) + if not mt.__declared[n] and debug.getinfo(2, "S").what ~= "C" then + error("variable '"..n.."' is not declared", 2) + end + return rawget(t, n) +end + diff --git a/deps/lua/src/lapi.c b/deps/lua/src/lapi.c new file mode 100644 index 0000000000000000000000000000000000000000..ce7bcf6f3a373b577ef051c9ee329045d37b929c --- /dev/null +++ b/deps/lua/src/lapi.c @@ -0,0 +1,1077 @@ +/* +** $Id: lapi.c,v 2.53 2006/01/10 12:50:00 roberto Exp $ +** Lua API +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include + +#define lapi_c +#define LUA_CORE + +#include "lua.h" + +#include "lapi.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lundump.h" +#include "lvm.h" + + + +const char lua_ident[] = + "$Lua: " LUA_VERSION " " LUA_COPYRIGHT " $\n" + "$Authors: " LUA_AUTHORS " $\n" + "$URL: www.lua.org $\n"; + + + +#define api_checknelems(L, n) api_check(L, (n) <= (L->top - L->base)) + +#define api_checkvalidindex(L, i) api_check(L, (i) != luaO_nilobject) + +#define api_incr_top(L) {api_check(L, L->top < L->ci->top); L->top++;} + + + +static TValue *index2adr (lua_State *L, int idx) { + if (idx > 0) { + TValue *o = L->base + (idx - 1); + api_check(L, idx <= L->ci->top - L->base); + if (o >= L->top) return cast(TValue *, luaO_nilobject); + else return o; + } + else if (idx > LUA_REGISTRYINDEX) { + api_check(L, idx != 0 && -idx <= L->top - L->base); + return L->top + idx; + } + else switch (idx) { /* pseudo-indices */ + case LUA_REGISTRYINDEX: return registry(L); + case LUA_ENVIRONINDEX: { + Closure *func = curr_func(L); + sethvalue(L, &L->env, func->c.env); + return &L->env; + } + case LUA_GLOBALSINDEX: return gt(L); + default: { + Closure *func = curr_func(L); + idx = LUA_GLOBALSINDEX - idx; + return (idx <= func->c.nupvalues) + ? &func->c.upvalue[idx-1] + : cast(TValue *, luaO_nilobject); + } + } +} + + +static Table *getcurrenv (lua_State *L) { + if (L->ci == L->base_ci) /* no enclosing function? */ + return hvalue(gt(L)); /* use global table as environment */ + else { + Closure *func = curr_func(L); + return func->c.env; + } +} + + +void luaA_pushobject (lua_State *L, const TValue *o) { + setobj2s(L, L->top, o); + api_incr_top(L); +} + + +LUA_API int lua_checkstack (lua_State *L, int size) { + int res; + lua_lock(L); + if ((L->top - L->base + size) > LUAI_MAXCSTACK) + res = 0; /* stack overflow */ + else { + luaD_checkstack(L, size); + if (L->ci->top < L->top + size) + L->ci->top = L->top + size; + res = 1; + } + lua_unlock(L); + return res; +} + + +LUA_API void lua_xmove (lua_State *from, lua_State *to, int n) { + int i; + if (from == to) return; + lua_lock(to); + api_checknelems(from, n); + api_check(from, G(from) == G(to)); + api_check(from, to->ci->top - to->top >= n); + from->top -= n; + for (i = 0; i < n; i++) { + setobj2s(to, to->top++, from->top + i); + } + lua_unlock(to); +} + + +LUA_API lua_CFunction lua_atpanic (lua_State *L, lua_CFunction panicf) { + lua_CFunction old; + lua_lock(L); + old = G(L)->panic; + G(L)->panic = panicf; + lua_unlock(L); + return old; +} + + +LUA_API lua_State *lua_newthread (lua_State *L) { + lua_State *L1; + lua_lock(L); + luaC_checkGC(L); + L1 = luaE_newthread(L); + setthvalue(L, L->top, L1); + api_incr_top(L); + lua_unlock(L); + luai_userstatethread(L, L1); + return L1; +} + + + +/* +** basic stack manipulation +*/ + + +LUA_API int lua_gettop (lua_State *L) { + return cast_int(L->top - L->base); +} + + +LUA_API void lua_settop (lua_State *L, int idx) { + lua_lock(L); + if (idx >= 0) { + api_check(L, idx <= L->stack_last - L->base); + while (L->top < L->base + idx) + setnilvalue(L->top++); + L->top = L->base + idx; + } + else { + api_check(L, -(idx+1) <= (L->top - L->base)); + L->top += idx+1; /* `subtract' index (index is negative) */ + } + lua_unlock(L); +} + + +LUA_API void lua_remove (lua_State *L, int idx) { + StkId p; + lua_lock(L); + p = index2adr(L, idx); + api_checkvalidindex(L, p); + while (++p < L->top) setobjs2s(L, p-1, p); + L->top--; + lua_unlock(L); +} + + +LUA_API void lua_insert (lua_State *L, int idx) { + StkId p; + StkId q; + lua_lock(L); + p = index2adr(L, idx); + api_checkvalidindex(L, p); + for (q = L->top; q>p; q--) setobjs2s(L, q, q-1); + setobjs2s(L, p, L->top); + lua_unlock(L); +} + + +LUA_API void lua_replace (lua_State *L, int idx) { + StkId o; + lua_lock(L); + api_checknelems(L, 1); + o = index2adr(L, idx); + api_checkvalidindex(L, o); + if (idx == LUA_ENVIRONINDEX) { + Closure *func = curr_func(L); + api_check(L, ttistable(L->top - 1)); + func->c.env = hvalue(L->top - 1); + luaC_barrier(L, func, L->top - 1); + } + else { + setobj(L, o, L->top - 1); + if (idx < LUA_GLOBALSINDEX) /* function upvalue? */ + luaC_barrier(L, curr_func(L), L->top - 1); + } + L->top--; + lua_unlock(L); +} + + +LUA_API void lua_pushvalue (lua_State *L, int idx) { + lua_lock(L); + setobj2s(L, L->top, index2adr(L, idx)); + api_incr_top(L); + lua_unlock(L); +} + + + +/* +** access functions (stack -> C) +*/ + + +LUA_API int lua_type (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return (o == luaO_nilobject) ? LUA_TNONE : ttype(o); +} + + +LUA_API const char *lua_typename (lua_State *L, int t) { + UNUSED(L); + return (t == LUA_TNONE) ? "no value" : luaT_typenames[t]; +} + + +LUA_API int lua_iscfunction (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return iscfunction(o); +} + + +LUA_API int lua_isnumber (lua_State *L, int idx) { + TValue n; + const TValue *o = index2adr(L, idx); + return tonumber(o, &n); +} + + +LUA_API int lua_isstring (lua_State *L, int idx) { + int t = lua_type(L, idx); + return (t == LUA_TSTRING || t == LUA_TNUMBER); +} + + +LUA_API int lua_isuserdata (lua_State *L, int idx) { + const TValue *o = index2adr(L, idx); + return (ttisuserdata(o) || ttislightuserdata(o)); +} + + +LUA_API int lua_rawequal (lua_State *L, int index1, int index2) { + StkId o1 = index2adr(L, index1); + StkId o2 = index2adr(L, index2); + return (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 + : luaO_rawequalObj(o1, o2); +} + + +LUA_API int lua_equal (lua_State *L, int index1, int index2) { + StkId o1, o2; + int i; + lua_lock(L); /* may call tag method */ + o1 = index2adr(L, index1); + o2 = index2adr(L, index2); + i = (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 : equalobj(L, o1, o2); + lua_unlock(L); + return i; +} + + +LUA_API int lua_lessthan (lua_State *L, int index1, int index2) { + StkId o1, o2; + int i; + lua_lock(L); /* may call tag method */ + o1 = index2adr(L, index1); + o2 = index2adr(L, index2); + i = (o1 == luaO_nilobject || o2 == luaO_nilobject) ? 0 + : luaV_lessthan(L, o1, o2); + lua_unlock(L); + return i; +} + + + +LUA_API lua_Number lua_tonumber (lua_State *L, int idx) { + TValue n; + const TValue *o = index2adr(L, idx); + if (tonumber(o, &n)) + return nvalue(o); + else + return 0; +} + + +LUA_API lua_Integer lua_tointeger (lua_State *L, int idx) { + TValue n; + const TValue *o = index2adr(L, idx); + if (tonumber(o, &n)) { + lua_Integer res; + lua_Number num = nvalue(o); + lua_number2integer(res, num); + return res; + } + else + return 0; +} + + +LUA_API int lua_toboolean (lua_State *L, int idx) { + const TValue *o = index2adr(L, idx); + return !l_isfalse(o); +} + + +LUA_API const char *lua_tolstring (lua_State *L, int idx, size_t *len) { + StkId o = index2adr(L, idx); + if (!ttisstring(o)) { + lua_lock(L); /* `luaV_tostring' may create a new string */ + if (!luaV_tostring(L, o)) { /* conversion failed? */ + if (len != NULL) *len = 0; + lua_unlock(L); + return NULL; + } + luaC_checkGC(L); + o = index2adr(L, idx); /* previous call may reallocate the stack */ + lua_unlock(L); + } + if (len != NULL) *len = tsvalue(o)->len; + return svalue(o); +} + + +LUA_API size_t lua_objlen (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + switch (ttype(o)) { + case LUA_TSTRING: return tsvalue(o)->len; + case LUA_TUSERDATA: return uvalue(o)->len; + case LUA_TTABLE: return luaH_getn(hvalue(o)); + case LUA_TNUMBER: { + size_t l; + lua_lock(L); /* `luaV_tostring' may create a new string */ + l = (luaV_tostring(L, o) ? tsvalue(o)->len : 0); + lua_unlock(L); + return l; + } + default: return 0; + } +} + + +LUA_API lua_CFunction lua_tocfunction (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return (!iscfunction(o)) ? NULL : clvalue(o)->c.f; +} + + +LUA_API void *lua_touserdata (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + switch (ttype(o)) { + case LUA_TUSERDATA: return (rawuvalue(o) + 1); + case LUA_TLIGHTUSERDATA: return pvalue(o); + default: return NULL; + } +} + + +LUA_API lua_State *lua_tothread (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + return (!ttisthread(o)) ? NULL : thvalue(o); +} + + +LUA_API const void *lua_topointer (lua_State *L, int idx) { + StkId o = index2adr(L, idx); + switch (ttype(o)) { + case LUA_TTABLE: return hvalue(o); + case LUA_TFUNCTION: return clvalue(o); + case LUA_TTHREAD: return thvalue(o); + case LUA_TUSERDATA: + case LUA_TLIGHTUSERDATA: + return lua_touserdata(L, idx); + default: return NULL; + } +} + + + +/* +** push functions (C -> stack) +*/ + + +LUA_API void lua_pushnil (lua_State *L) { + lua_lock(L); + setnilvalue(L->top); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushnumber (lua_State *L, lua_Number n) { + lua_lock(L); + setnvalue(L->top, n); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushinteger (lua_State *L, lua_Integer n) { + lua_lock(L); + setnvalue(L->top, cast_num(n)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushlstring (lua_State *L, const char *s, size_t len) { + lua_lock(L); + luaC_checkGC(L); + setsvalue2s(L, L->top, luaS_newlstr(L, s, len)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushstring (lua_State *L, const char *s) { + if (s == NULL) + lua_pushnil(L); + else + lua_pushlstring(L, s, strlen(s)); +} + + +LUA_API const char *lua_pushvfstring (lua_State *L, const char *fmt, + va_list argp) { + const char *ret; + lua_lock(L); + luaC_checkGC(L); + ret = luaO_pushvfstring(L, fmt, argp); + lua_unlock(L); + return ret; +} + + +LUA_API const char *lua_pushfstring (lua_State *L, const char *fmt, ...) { + const char *ret; + va_list argp; + lua_lock(L); + luaC_checkGC(L); + va_start(argp, fmt); + ret = luaO_pushvfstring(L, fmt, argp); + va_end(argp); + lua_unlock(L); + return ret; +} + + +LUA_API void lua_pushcclosure (lua_State *L, lua_CFunction fn, int n) { + Closure *cl; + lua_lock(L); + luaC_checkGC(L); + api_checknelems(L, n); + cl = luaF_newCclosure(L, n, getcurrenv(L)); + cl->c.f = fn; + L->top -= n; + while (n--) + setobj2n(L, &cl->c.upvalue[n], L->top+n); + setclvalue(L, L->top, cl); + lua_assert(iswhite(obj2gco(cl))); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushboolean (lua_State *L, int b) { + lua_lock(L); + setbvalue(L->top, (b != 0)); /* ensure that true is 1 */ + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_pushlightuserdata (lua_State *L, void *p) { + lua_lock(L); + setpvalue(L->top, p); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API int lua_pushthread (lua_State *L) { + lua_lock(L); + setthvalue(L, L->top, L); + api_incr_top(L); + lua_unlock(L); + return (G(L)->mainthread == L); +} + + + +/* +** get functions (Lua -> stack) +*/ + + +LUA_API void lua_gettable (lua_State *L, int idx) { + StkId t; + lua_lock(L); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + luaV_gettable(L, t, L->top - 1, L->top - 1); + lua_unlock(L); +} + + +LUA_API void lua_getfield (lua_State *L, int idx, const char *k) { + StkId t; + TValue key; + lua_lock(L); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + setsvalue(L, &key, luaS_new(L, k)); + luaV_gettable(L, t, &key, L->top); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_rawget (lua_State *L, int idx) { + StkId t; + lua_lock(L); + t = index2adr(L, idx); + api_check(L, ttistable(t)); + setobj2s(L, L->top - 1, luaH_get(hvalue(t), L->top - 1)); + lua_unlock(L); +} + + +LUA_API void lua_rawgeti (lua_State *L, int idx, int n) { + StkId o; + lua_lock(L); + o = index2adr(L, idx); + api_check(L, ttistable(o)); + setobj2s(L, L->top, luaH_getnum(hvalue(o), n)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API void lua_createtable (lua_State *L, int narray, int nrec) { + lua_lock(L); + luaC_checkGC(L); + sethvalue(L, L->top, luaH_new(L, narray, nrec)); + api_incr_top(L); + lua_unlock(L); +} + + +LUA_API int lua_getmetatable (lua_State *L, int objindex) { + const TValue *obj; + Table *mt = NULL; + int res; + lua_lock(L); + obj = index2adr(L, objindex); + switch (ttype(obj)) { + case LUA_TTABLE: + mt = hvalue(obj)->metatable; + break; + case LUA_TUSERDATA: + mt = uvalue(obj)->metatable; + break; + default: + mt = G(L)->mt[ttype(obj)]; + break; + } + if (mt == NULL) + res = 0; + else { + sethvalue(L, L->top, mt); + api_incr_top(L); + res = 1; + } + lua_unlock(L); + return res; +} + + +LUA_API void lua_getfenv (lua_State *L, int idx) { + StkId o; + lua_lock(L); + o = index2adr(L, idx); + api_checkvalidindex(L, o); + switch (ttype(o)) { + case LUA_TFUNCTION: + sethvalue(L, L->top, clvalue(o)->c.env); + break; + case LUA_TUSERDATA: + sethvalue(L, L->top, uvalue(o)->env); + break; + case LUA_TTHREAD: + setobj2s(L, L->top, gt(thvalue(o))); + break; + default: + setnilvalue(L->top); + break; + } + api_incr_top(L); + lua_unlock(L); +} + + +/* +** set functions (stack -> Lua) +*/ + + +LUA_API void lua_settable (lua_State *L, int idx) { + StkId t; + lua_lock(L); + api_checknelems(L, 2); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + luaV_settable(L, t, L->top - 2, L->top - 1); + L->top -= 2; /* pop index and value */ + lua_unlock(L); +} + + +LUA_API void lua_setfield (lua_State *L, int idx, const char *k) { + StkId t; + TValue key; + lua_lock(L); + api_checknelems(L, 1); + t = index2adr(L, idx); + api_checkvalidindex(L, t); + setsvalue(L, &key, luaS_new(L, k)); + luaV_settable(L, t, &key, L->top - 1); + L->top--; /* pop value */ + lua_unlock(L); +} + + +LUA_API void lua_rawset (lua_State *L, int idx) { + StkId t; + lua_lock(L); + api_checknelems(L, 2); + t = index2adr(L, idx); + api_check(L, ttistable(t)); + setobj2t(L, luaH_set(L, hvalue(t), L->top-2), L->top-1); + luaC_barriert(L, hvalue(t), L->top-1); + L->top -= 2; + lua_unlock(L); +} + + +LUA_API void lua_rawseti (lua_State *L, int idx, int n) { + StkId o; + lua_lock(L); + api_checknelems(L, 1); + o = index2adr(L, idx); + api_check(L, ttistable(o)); + setobj2t(L, luaH_setnum(L, hvalue(o), n), L->top-1); + luaC_barriert(L, hvalue(o), L->top-1); + L->top--; + lua_unlock(L); +} + + +LUA_API int lua_setmetatable (lua_State *L, int objindex) { + TValue *obj; + Table *mt; + lua_lock(L); + api_checknelems(L, 1); + obj = index2adr(L, objindex); + api_checkvalidindex(L, obj); + if (ttisnil(L->top - 1)) + mt = NULL; + else { + api_check(L, ttistable(L->top - 1)); + mt = hvalue(L->top - 1); + } + switch (ttype(obj)) { + case LUA_TTABLE: { + hvalue(obj)->metatable = mt; + if (mt) + luaC_objbarriert(L, hvalue(obj), mt); + break; + } + case LUA_TUSERDATA: { + uvalue(obj)->metatable = mt; + if (mt) + luaC_objbarrier(L, rawuvalue(obj), mt); + break; + } + default: { + G(L)->mt[ttype(obj)] = mt; + break; + } + } + L->top--; + lua_unlock(L); + return 1; +} + + +LUA_API int lua_setfenv (lua_State *L, int idx) { + StkId o; + int res = 1; + lua_lock(L); + api_checknelems(L, 1); + o = index2adr(L, idx); + api_checkvalidindex(L, o); + api_check(L, ttistable(L->top - 1)); + switch (ttype(o)) { + case LUA_TFUNCTION: + clvalue(o)->c.env = hvalue(L->top - 1); + break; + case LUA_TUSERDATA: + uvalue(o)->env = hvalue(L->top - 1); + break; + case LUA_TTHREAD: + sethvalue(L, gt(thvalue(o)), hvalue(L->top - 1)); + break; + default: + res = 0; + break; + } + luaC_objbarrier(L, gcvalue(o), hvalue(L->top - 1)); + L->top--; + lua_unlock(L); + return res; +} + + +/* +** `load' and `call' functions (run Lua code) +*/ + + +#define adjustresults(L,nres) \ + { if (nres == LUA_MULTRET && L->top >= L->ci->top) L->ci->top = L->top; } + + +#define checkresults(L,na,nr) \ + api_check(L, (nr) == LUA_MULTRET || (L->ci->top - L->top >= (nr) - (na))) + + +LUA_API void lua_call (lua_State *L, int nargs, int nresults) { + StkId func; + lua_lock(L); + api_checknelems(L, nargs+1); + checkresults(L, nargs, nresults); + func = L->top - (nargs+1); + luaD_call(L, func, nresults); + adjustresults(L, nresults); + lua_unlock(L); +} + + + +/* +** Execute a protected call. +*/ +struct CallS { /* data to `f_call' */ + StkId func; + int nresults; +}; + + +static void f_call (lua_State *L, void *ud) { + struct CallS *c = cast(struct CallS *, ud); + luaD_call(L, c->func, c->nresults); +} + + + +LUA_API int lua_pcall (lua_State *L, int nargs, int nresults, int errfunc) { + struct CallS c; + int status; + ptrdiff_t func; + lua_lock(L); + api_checknelems(L, nargs+1); + checkresults(L, nargs, nresults); + if (errfunc == 0) + func = 0; + else { + StkId o = index2adr(L, errfunc); + api_checkvalidindex(L, o); + func = savestack(L, o); + } + c.func = L->top - (nargs+1); /* function to be called */ + c.nresults = nresults; + status = luaD_pcall(L, f_call, &c, savestack(L, c.func), func); + adjustresults(L, nresults); + lua_unlock(L); + return status; +} + + +/* +** Execute a protected C call. +*/ +struct CCallS { /* data to `f_Ccall' */ + lua_CFunction func; + void *ud; +}; + + +static void f_Ccall (lua_State *L, void *ud) { + struct CCallS *c = cast(struct CCallS *, ud); + Closure *cl; + cl = luaF_newCclosure(L, 0, getcurrenv(L)); + cl->c.f = c->func; + setclvalue(L, L->top, cl); /* push function */ + api_incr_top(L); + setpvalue(L->top, c->ud); /* push only argument */ + api_incr_top(L); + luaD_call(L, L->top - 2, 0); +} + + +LUA_API int lua_cpcall (lua_State *L, lua_CFunction func, void *ud) { + struct CCallS c; + int status; + lua_lock(L); + c.func = func; + c.ud = ud; + status = luaD_pcall(L, f_Ccall, &c, savestack(L, L->top), 0); + lua_unlock(L); + return status; +} + + +LUA_API int lua_load (lua_State *L, lua_Reader reader, void *data, + const char *chunkname) { + ZIO z; + int status; + lua_lock(L); + if (!chunkname) chunkname = "?"; + luaZ_init(L, &z, reader, data); + status = luaD_protectedparser(L, &z, chunkname); + lua_unlock(L); + return status; +} + + +LUA_API int lua_dump (lua_State *L, lua_Writer writer, void *data) { + int status; + TValue *o; + lua_lock(L); + api_checknelems(L, 1); + o = L->top - 1; + if (isLfunction(o)) + status = luaU_dump(L, clvalue(o)->l.p, writer, data, 0); + else + status = 1; + lua_unlock(L); + return status; +} + + +LUA_API int lua_status (lua_State *L) { + return L->status; +} + + +/* +** Garbage-collection function +*/ + +LUA_API int lua_gc (lua_State *L, int what, int data) { + int res = 0; + global_State *g; + lua_lock(L); + g = G(L); + switch (what) { + case LUA_GCSTOP: { + g->GCthreshold = MAX_LUMEM; + break; + } + case LUA_GCRESTART: { + g->GCthreshold = g->totalbytes; + break; + } + case LUA_GCCOLLECT: { + luaC_fullgc(L); + break; + } + case LUA_GCCOUNT: { + /* GC values are expressed in Kbytes: #bytes/2^10 */ + res = cast_int(g->totalbytes >> 10); + break; + } + case LUA_GCCOUNTB: { + res = cast_int(g->totalbytes & 0x3ff); + break; + } + case LUA_GCSTEP: { + lu_mem a = (cast(lu_mem, data) << 10); + if (a <= g->totalbytes) + g->GCthreshold = g->totalbytes - a; + else + g->GCthreshold = 0; + while (g->GCthreshold <= g->totalbytes) + luaC_step(L); + if (g->gcstate == GCSpause) /* end of cycle? */ + res = 1; /* signal it */ + break; + } + case LUA_GCSETPAUSE: { + res = g->gcpause; + g->gcpause = data; + break; + } + case LUA_GCSETSTEPMUL: { + res = g->gcstepmul; + g->gcstepmul = data; + break; + } + default: res = -1; /* invalid option */ + } + lua_unlock(L); + return res; +} + + + +/* +** miscellaneous functions +*/ + + +LUA_API int lua_error (lua_State *L) { + lua_lock(L); + api_checknelems(L, 1); + luaG_errormsg(L); + lua_unlock(L); + return 0; /* to avoid warnings */ +} + + +LUA_API int lua_next (lua_State *L, int idx) { + StkId t; + int more; + lua_lock(L); + t = index2adr(L, idx); + api_check(L, ttistable(t)); + more = luaH_next(L, hvalue(t), L->top - 1); + if (more) { + api_incr_top(L); + } + else /* no more elements */ + L->top -= 1; /* remove key */ + lua_unlock(L); + return more; +} + + +LUA_API void lua_concat (lua_State *L, int n) { + lua_lock(L); + api_checknelems(L, n); + if (n >= 2) { + luaC_checkGC(L); + luaV_concat(L, n, cast_int(L->top - L->base) - 1); + L->top -= (n-1); + } + else if (n == 0) { /* push empty string */ + setsvalue2s(L, L->top, luaS_newlstr(L, "", 0)); + api_incr_top(L); + } + /* else n == 1; nothing to do */ + lua_unlock(L); +} + + +LUA_API lua_Alloc lua_getallocf (lua_State *L, void **ud) { + lua_Alloc f; + lua_lock(L); + if (ud) *ud = G(L)->ud; + f = G(L)->frealloc; + lua_unlock(L); + return f; +} + + +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud) { + lua_lock(L); + G(L)->ud = ud; + G(L)->frealloc = f; + lua_unlock(L); +} + + +LUA_API void *lua_newuserdata (lua_State *L, size_t size) { + Udata *u; + lua_lock(L); + luaC_checkGC(L); + u = luaS_newudata(L, size, getcurrenv(L)); + setuvalue(L, L->top, u); + api_incr_top(L); + lua_unlock(L); + return u + 1; +} + + + + +static const char *aux_upvalue (StkId fi, int n, TValue **val) { + Closure *f; + if (!ttisfunction(fi)) return NULL; + f = clvalue(fi); + if (f->c.isC) { + if (!(1 <= n && n <= f->c.nupvalues)) return NULL; + *val = &f->c.upvalue[n-1]; + return ""; + } + else { + Proto *p = f->l.p; + if (!(1 <= n && n <= p->sizeupvalues)) return NULL; + *val = f->l.upvals[n-1]->v; + return getstr(p->upvalues[n-1]); + } +} + + +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n) { + const char *name; + TValue *val; + lua_lock(L); + name = aux_upvalue(index2adr(L, funcindex), n, &val); + if (name) { + setobj2s(L, L->top, val); + api_incr_top(L); + } + lua_unlock(L); + return name; +} + + +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n) { + const char *name; + TValue *val; + StkId fi; + lua_lock(L); + fi = index2adr(L, funcindex); + api_checknelems(L, 1); + name = aux_upvalue(fi, n, &val); + if (name) { + L->top--; + setobj(L, val, L->top); + luaC_barrier(L, clvalue(fi), L->top); + } + lua_unlock(L); + return name; +} + diff --git a/deps/lua/src/lapi.h b/deps/lua/src/lapi.h new file mode 100644 index 0000000000000000000000000000000000000000..9d1d435649c7431281e56b852364e697f5a75bf8 --- /dev/null +++ b/deps/lua/src/lapi.h @@ -0,0 +1,16 @@ +/* +** $Id: lapi.h,v 2.2 2005/04/25 19:24:10 roberto Exp $ +** Auxiliary functions from Lua API +** See Copyright Notice in lua.h +*/ + +#ifndef lapi_h +#define lapi_h + + +#include "lobject.h" + + +LUAI_FUNC void luaA_pushobject (lua_State *L, const TValue *o); + +#endif diff --git a/deps/lua/src/lauxlib.c b/deps/lua/src/lauxlib.c new file mode 100644 index 0000000000000000000000000000000000000000..7d00b3a992573c012b2ce2557a0e5a8ddf03a215 --- /dev/null +++ b/deps/lua/src/lauxlib.c @@ -0,0 +1,647 @@ +/* +** $Id: lauxlib.c,v 1.158 2006/01/16 12:42:21 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include +#include +#include + + +/* This file uses only the official API of Lua. +** Any function declared here could be written as an application function. +*/ + +#define lauxlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" + + +#define FREELIST_REF 0 /* free list of references */ + + +/* convert a stack index to positive */ +#define abs_index(L, i) ((i) > 0 || (i) <= LUA_REGISTRYINDEX ? (i) : \ + lua_gettop(L) + (i) + 1) + + +/* +** {====================================================== +** Error-report functions +** ======================================================= +*/ + + +LUALIB_API int luaL_argerror (lua_State *L, int narg, const char *extramsg) { + lua_Debug ar; + if (!lua_getstack(L, 0, &ar)) /* no stack frame? */ + return luaL_error(L, "bad argument #%d (%s)", narg, extramsg); + lua_getinfo(L, "n", &ar); + if (strcmp(ar.namewhat, "method") == 0) { + narg--; /* do not count `self' */ + if (narg == 0) /* error is in the self argument itself? */ + return luaL_error(L, "calling " LUA_QS " on bad self (%s)", + ar.name, extramsg); + } + if (ar.name == NULL) + ar.name = "?"; + return luaL_error(L, "bad argument #%d to " LUA_QS " (%s)", + narg, ar.name, extramsg); +} + + +LUALIB_API int luaL_typerror (lua_State *L, int narg, const char *tname) { + const char *msg = lua_pushfstring(L, "%s expected, got %s", + tname, luaL_typename(L, narg)); + return luaL_argerror(L, narg, msg); +} + + +static void tag_error (lua_State *L, int narg, int tag) { + luaL_typerror(L, narg, lua_typename(L, tag)); +} + + +LUALIB_API void luaL_where (lua_State *L, int level) { + lua_Debug ar; + if (lua_getstack(L, level, &ar)) { /* check function at level */ + lua_getinfo(L, "Sl", &ar); /* get info about it */ + if (ar.currentline > 0) { /* is there info? */ + lua_pushfstring(L, "%s:%d: ", ar.short_src, ar.currentline); + return; + } + } + lua_pushliteral(L, ""); /* else, no information available... */ +} + + +LUALIB_API int luaL_error (lua_State *L, const char *fmt, ...) { + va_list argp; + va_start(argp, fmt); + luaL_where(L, 1); + lua_pushvfstring(L, fmt, argp); + va_end(argp); + lua_concat(L, 2); + return lua_error(L); +} + +/* }====================================================== */ + + +LUALIB_API int luaL_checkoption (lua_State *L, int narg, const char *def, + const char *const lst[]) { + const char *name = (def) ? luaL_optstring(L, narg, def) : + luaL_checkstring(L, narg); + int i; + for (i=0; lst[i]; i++) + if (strcmp(lst[i], name) == 0) + return i; + return luaL_argerror(L, narg, + lua_pushfstring(L, "invalid option " LUA_QS, name)); +} + + +LUALIB_API int luaL_newmetatable (lua_State *L, const char *tname) { + lua_getfield(L, LUA_REGISTRYINDEX, tname); /* get registry.name */ + if (!lua_isnil(L, -1)) /* name already in use? */ + return 0; /* leave previous value on top, but return 0 */ + lua_pop(L, 1); + lua_newtable(L); /* create metatable */ + lua_pushvalue(L, -1); + lua_setfield(L, LUA_REGISTRYINDEX, tname); /* registry.name = metatable */ + return 1; +} + + +LUALIB_API void *luaL_checkudata (lua_State *L, int ud, const char *tname) { + void *p = lua_touserdata(L, ud); + lua_getfield(L, LUA_REGISTRYINDEX, tname); /* get correct metatable */ + if (p == NULL || !lua_getmetatable(L, ud) || !lua_rawequal(L, -1, -2)) + luaL_typerror(L, ud, tname); + lua_pop(L, 2); /* remove both metatables */ + return p; +} + + +LUALIB_API void luaL_checkstack (lua_State *L, int space, const char *mes) { + if (!lua_checkstack(L, space)) + luaL_error(L, "stack overflow (%s)", mes); +} + + +LUALIB_API void luaL_checktype (lua_State *L, int narg, int t) { + if (lua_type(L, narg) != t) + tag_error(L, narg, t); +} + + +LUALIB_API void luaL_checkany (lua_State *L, int narg) { + if (lua_type(L, narg) == LUA_TNONE) + luaL_argerror(L, narg, "value expected"); +} + + +LUALIB_API const char *luaL_checklstring (lua_State *L, int narg, size_t *len) { + const char *s = lua_tolstring(L, narg, len); + if (!s) tag_error(L, narg, LUA_TSTRING); + return s; +} + + +LUALIB_API const char *luaL_optlstring (lua_State *L, int narg, + const char *def, size_t *len) { + if (lua_isnoneornil(L, narg)) { + if (len) + *len = (def ? strlen(def) : 0); + return def; + } + else return luaL_checklstring(L, narg, len); +} + + +LUALIB_API lua_Number luaL_checknumber (lua_State *L, int narg) { + lua_Number d = lua_tonumber(L, narg); + if (d == 0 && !lua_isnumber(L, narg)) /* avoid extra test when d is not 0 */ + tag_error(L, narg, LUA_TNUMBER); + return d; +} + + +LUALIB_API lua_Number luaL_optnumber (lua_State *L, int narg, lua_Number def) { + return luaL_opt(L, luaL_checknumber, narg, def); +} + + +LUALIB_API lua_Integer luaL_checkinteger (lua_State *L, int narg) { + lua_Integer d = lua_tointeger(L, narg); + if (d == 0 && !lua_isnumber(L, narg)) /* avoid extra test when d is not 0 */ + tag_error(L, narg, LUA_TNUMBER); + return d; +} + + +LUALIB_API lua_Integer luaL_optinteger (lua_State *L, int narg, + lua_Integer def) { + return luaL_opt(L, luaL_checkinteger, narg, def); +} + + +LUALIB_API int luaL_getmetafield (lua_State *L, int obj, const char *event) { + if (!lua_getmetatable(L, obj)) /* no metatable? */ + return 0; + lua_pushstring(L, event); + lua_rawget(L, -2); + if (lua_isnil(L, -1)) { + lua_pop(L, 2); /* remove metatable and metafield */ + return 0; + } + else { + lua_remove(L, -2); /* remove only metatable */ + return 1; + } +} + + +LUALIB_API int luaL_callmeta (lua_State *L, int obj, const char *event) { + obj = abs_index(L, obj); + if (!luaL_getmetafield(L, obj, event)) /* no metafield? */ + return 0; + lua_pushvalue(L, obj); + lua_call(L, 1, 1); + return 1; +} + + +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l) { + luaI_openlib(L, libname, l, 0); +} + + +static int libsize (const luaL_Reg *l) { + int size = 0; + for (; l->name; l++) size++; + return size; +} + + +LUALIB_API void luaI_openlib (lua_State *L, const char *libname, + const luaL_Reg *l, int nup) { + if (libname) { + int size = libsize(l); + /* check whether lib already exists */ + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", size); + lua_getfield(L, -1, libname); /* get _LOADED[libname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, libname, size) != NULL) + luaL_error(L, "name conflict for module " LUA_QS, libname); + lua_pushvalue(L, -1); + lua_setfield(L, -3, libname); /* _LOADED[libname] = new table */ + } + lua_remove(L, -2); /* remove _LOADED table */ + lua_insert(L, -(nup+1)); /* move library table to below upvalues */ + } + for (; l->name; l++) { + int i; + for (i=0; ifunc, nup); + lua_setfield(L, -(nup+2), l->name); + } + lua_pop(L, nup); /* remove upvalues */ +} + + + +/* +** {====================================================== +** getn-setn: size for arrays +** ======================================================= +*/ + +#if defined(LUA_COMPAT_GETN) + +static int checkint (lua_State *L, int topop) { + int n = (lua_type(L, -1) == LUA_TNUMBER) ? lua_tointeger(L, -1) : -1; + lua_pop(L, topop); + return n; +} + + +static void getsizes (lua_State *L) { + lua_getfield(L, LUA_REGISTRYINDEX, "LUA_SIZES"); + if (lua_isnil(L, -1)) { /* no `size' table? */ + lua_pop(L, 1); /* remove nil */ + lua_newtable(L); /* create it */ + lua_pushvalue(L, -1); /* `size' will be its own metatable */ + lua_setmetatable(L, -2); + lua_pushliteral(L, "kv"); + lua_setfield(L, -2, "__mode"); /* metatable(N).__mode = "kv" */ + lua_pushvalue(L, -1); + lua_setfield(L, LUA_REGISTRYINDEX, "LUA_SIZES"); /* store in register */ + } +} + + +LUALIB_API void luaL_setn (lua_State *L, int t, int n) { + t = abs_index(L, t); + lua_pushliteral(L, "n"); + lua_rawget(L, t); + if (checkint(L, 1) >= 0) { /* is there a numeric field `n'? */ + lua_pushliteral(L, "n"); /* use it */ + lua_pushinteger(L, n); + lua_rawset(L, t); + } + else { /* use `sizes' */ + getsizes(L); + lua_pushvalue(L, t); + lua_pushinteger(L, n); + lua_rawset(L, -3); /* sizes[t] = n */ + lua_pop(L, 1); /* remove `sizes' */ + } +} + + +LUALIB_API int luaL_getn (lua_State *L, int t) { + int n; + t = abs_index(L, t); + lua_pushliteral(L, "n"); /* try t.n */ + lua_rawget(L, t); + if ((n = checkint(L, 1)) >= 0) return n; + getsizes(L); /* else try sizes[t] */ + lua_pushvalue(L, t); + lua_rawget(L, -2); + if ((n = checkint(L, 2)) >= 0) return n; + return (int)lua_objlen(L, t); +} + +#endif + +/* }====================================================== */ + + + +LUALIB_API const char *luaL_gsub (lua_State *L, const char *s, const char *p, + const char *r) { + const char *wild; + size_t l = strlen(p); + luaL_Buffer b; + luaL_buffinit(L, &b); + while ((wild = strstr(s, p)) != NULL) { + luaL_addlstring(&b, s, wild - s); /* push prefix */ + luaL_addstring(&b, r); /* push replacement in place of pattern */ + s = wild + l; /* continue after `p' */ + } + luaL_addstring(&b, s); /* push last suffix */ + luaL_pushresult(&b); + return lua_tostring(L, -1); +} + + +LUALIB_API const char *luaL_findtable (lua_State *L, int idx, + const char *fname, int szhint) { + const char *e; + lua_pushvalue(L, idx); + do { + e = strchr(fname, '.'); + if (e == NULL) e = fname + strlen(fname); + lua_pushlstring(L, fname, e - fname); + lua_rawget(L, -2); + if (lua_isnil(L, -1)) { /* no such field? */ + lua_pop(L, 1); /* remove this nil */ + lua_createtable(L, 0, (*e == '.' ? 1 : szhint)); /* new table for field */ + lua_pushlstring(L, fname, e - fname); + lua_pushvalue(L, -2); + lua_settable(L, -4); /* set new table into field */ + } + else if (!lua_istable(L, -1)) { /* field has a non-table value? */ + lua_pop(L, 2); /* remove table and value */ + return fname; /* return problematic part of the name */ + } + lua_remove(L, -2); /* remove previous table */ + fname = e + 1; + } while (*e == '.'); + return NULL; +} + + + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + +#define bufflen(B) ((B)->p - (B)->buffer) +#define bufffree(B) ((size_t)(LUAL_BUFFERSIZE - bufflen(B))) + +#define LIMIT (LUA_MINSTACK/2) + + +static int emptybuffer (luaL_Buffer *B) { + size_t l = bufflen(B); + if (l == 0) return 0; /* put nothing on stack */ + else { + lua_pushlstring(B->L, B->buffer, l); + B->p = B->buffer; + B->lvl++; + return 1; + } +} + + +static void adjuststack (luaL_Buffer *B) { + if (B->lvl > 1) { + lua_State *L = B->L; + int toget = 1; /* number of levels to concat */ + size_t toplen = lua_strlen(L, -1); + do { + size_t l = lua_strlen(L, -(toget+1)); + if (B->lvl - toget + 1 >= LIMIT || toplen > l) { + toplen += l; + toget++; + } + else break; + } while (toget < B->lvl); + lua_concat(L, toget); + B->lvl = B->lvl - toget + 1; + } +} + + +LUALIB_API char *luaL_prepbuffer (luaL_Buffer *B) { + if (emptybuffer(B)) + adjuststack(B); + return B->buffer; +} + + +LUALIB_API void luaL_addlstring (luaL_Buffer *B, const char *s, size_t l) { + while (l--) + luaL_addchar(B, *s++); +} + + +LUALIB_API void luaL_addstring (luaL_Buffer *B, const char *s) { + luaL_addlstring(B, s, strlen(s)); +} + + +LUALIB_API void luaL_pushresult (luaL_Buffer *B) { + emptybuffer(B); + lua_concat(B->L, B->lvl); + B->lvl = 1; +} + + +LUALIB_API void luaL_addvalue (luaL_Buffer *B) { + lua_State *L = B->L; + size_t vl; + const char *s = lua_tolstring(L, -1, &vl); + if (vl <= bufffree(B)) { /* fit into buffer? */ + memcpy(B->p, s, vl); /* put it there */ + B->p += vl; + lua_pop(L, 1); /* remove from stack */ + } + else { + if (emptybuffer(B)) + lua_insert(L, -2); /* put buffer before new value */ + B->lvl++; /* add new value into B stack */ + adjuststack(B); + } +} + + +LUALIB_API void luaL_buffinit (lua_State *L, luaL_Buffer *B) { + B->L = L; + B->p = B->buffer; + B->lvl = 0; +} + +/* }====================================================== */ + + +LUALIB_API int luaL_ref (lua_State *L, int t) { + int ref; + t = abs_index(L, t); + if (lua_isnil(L, -1)) { + lua_pop(L, 1); /* remove from stack */ + return LUA_REFNIL; /* `nil' has a unique fixed reference */ + } + lua_rawgeti(L, t, FREELIST_REF); /* get first free element */ + ref = (int)lua_tointeger(L, -1); /* ref = t[FREELIST_REF] */ + lua_pop(L, 1); /* remove it from stack */ + if (ref != 0) { /* any free element? */ + lua_rawgeti(L, t, ref); /* remove it from list */ + lua_rawseti(L, t, FREELIST_REF); /* (t[FREELIST_REF] = t[ref]) */ + } + else { /* no free elements */ + ref = (int)lua_objlen(L, t); + ref++; /* create new reference */ + } + lua_rawseti(L, t, ref); + return ref; +} + + +LUALIB_API void luaL_unref (lua_State *L, int t, int ref) { + if (ref >= 0) { + t = abs_index(L, t); + lua_rawgeti(L, t, FREELIST_REF); + lua_rawseti(L, t, ref); /* t[ref] = t[FREELIST_REF] */ + lua_pushinteger(L, ref); + lua_rawseti(L, t, FREELIST_REF); /* t[FREELIST_REF] = ref */ + } +} + + + +/* +** {====================================================== +** Load functions +** ======================================================= +*/ + +typedef struct LoadF { + int extraline; + FILE *f; + char buff[LUAL_BUFFERSIZE]; +} LoadF; + + +static const char *getF (lua_State *L, void *ud, size_t *size) { + LoadF *lf = (LoadF *)ud; + (void)L; + if (lf->extraline) { + lf->extraline = 0; + *size = 1; + return "\n"; + } + if (feof(lf->f)) return NULL; + *size = fread(lf->buff, 1, LUAL_BUFFERSIZE, lf->f); + return (*size > 0) ? lf->buff : NULL; +} + + +static int errfile (lua_State *L, const char *what, int fnameindex) { + const char *serr = strerror(errno); + const char *filename = lua_tostring(L, fnameindex) + 1; + lua_pushfstring(L, "cannot %s %s: %s", what, filename, serr); + lua_remove(L, fnameindex); + return LUA_ERRFILE; +} + + +LUALIB_API int luaL_loadfile (lua_State *L, const char *filename) { + LoadF lf; + int status, readstatus; + int c; + int fnameindex = lua_gettop(L) + 1; /* index of filename on the stack */ + lf.extraline = 0; + if (filename == NULL) { + lua_pushliteral(L, "=stdin"); + lf.f = stdin; + } + else { + lua_pushfstring(L, "@%s", filename); + lf.f = fopen(filename, "r"); + if (lf.f == NULL) return errfile(L, "open", fnameindex); + } + c = getc(lf.f); + if (c == '#') { /* Unix exec. file? */ + lf.extraline = 1; + while ((c = getc(lf.f)) != EOF && c != '\n') ; /* skip first line */ + if (c == '\n') c = getc(lf.f); + } + if (c == LUA_SIGNATURE[0] && lf.f != stdin) { /* binary file? */ + fclose(lf.f); + lf.f = fopen(filename, "rb"); /* reopen in binary mode */ + if (lf.f == NULL) return errfile(L, "reopen", fnameindex); + /* skip eventual `#!...' */ + while ((c = getc(lf.f)) != EOF && c != LUA_SIGNATURE[0]) ; + lf.extraline = 0; + } + ungetc(c, lf.f); + status = lua_load(L, getF, &lf, lua_tostring(L, -1)); + readstatus = ferror(lf.f); + if (lf.f != stdin) fclose(lf.f); /* close file (even in case of errors) */ + if (readstatus) { + lua_settop(L, fnameindex); /* ignore results from `lua_load' */ + return errfile(L, "read", fnameindex); + } + lua_remove(L, fnameindex); + return status; +} + + +typedef struct LoadS { + const char *s; + size_t size; +} LoadS; + + +static const char *getS (lua_State *L, void *ud, size_t *size) { + LoadS *ls = (LoadS *)ud; + (void)L; + if (ls->size == 0) return NULL; + *size = ls->size; + ls->size = 0; + return ls->s; +} + + +LUALIB_API int luaL_loadbuffer (lua_State *L, const char *buff, size_t size, + const char *name) { + LoadS ls; + ls.s = buff; + ls.size = size; + return lua_load(L, getS, &ls, name); +} + + +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s) { + return luaL_loadbuffer(L, s, strlen(s), s); +} + + + +/* }====================================================== */ + + +static void *l_alloc (void *ud, void *ptr, size_t osize, size_t nsize) { + (void)ud; + (void)osize; + if (nsize == 0) { + free(ptr); + return NULL; + } + else + return realloc(ptr, nsize); +} + + +static int panic (lua_State *L) { + (void)L; /* to avoid warnings */ + fprintf(stderr, "PANIC: unprotected error in call to Lua API (%s)\n", + lua_tostring(L, -1)); + return 0; +} + + +LUALIB_API lua_State *luaL_newstate (void) { + lua_State *L = lua_newstate(l_alloc, NULL); + if (L) lua_atpanic(L, &panic); + return L; +} + diff --git a/deps/lua/src/lauxlib.h b/deps/lua/src/lauxlib.h new file mode 100644 index 0000000000000000000000000000000000000000..1bba1c04f8be7bd77af0399c95fdd8f8d3b290d7 --- /dev/null +++ b/deps/lua/src/lauxlib.h @@ -0,0 +1,172 @@ +/* +** $Id: lauxlib.h,v 1.87 2005/12/29 15:32:11 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lauxlib_h +#define lauxlib_h + + +#include +#include + +#include "lua.h" + + +#if defined(LUA_COMPAT_GETN) +LUALIB_API int (luaL_getn) (lua_State *L, int t); +LUALIB_API void (luaL_setn) (lua_State *L, int t, int n); +#else +#define luaL_getn(L,i) ((int)lua_objlen(L, i)) +#define luaL_setn(L,i,j) ((void)0) /* no op! */ +#endif + +#if defined(LUA_COMPAT_OPENLIB) +#define luaI_openlib luaL_openlib +#endif + + +/* extra error code for `luaL_load' */ +#define LUA_ERRFILE (LUA_ERRERR+1) + + +typedef struct luaL_Reg { + const char *name; + lua_CFunction func; +} luaL_Reg; + + + +LUALIB_API void (luaI_openlib) (lua_State *L, const char *libname, + const luaL_Reg *l, int nup); +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l); +LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname); +LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg); +LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg, + size_t *l); +LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg, + const char *def, size_t *l); +LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg); +LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def); + +LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg); +LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg, + lua_Integer def); + +LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg); +LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t); +LUALIB_API void (luaL_checkany) (lua_State *L, int narg); + +LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname); +LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname); + +LUALIB_API void (luaL_where) (lua_State *L, int lvl); +LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...); + +LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, + const char *const lst[]); + +LUALIB_API int (luaL_ref) (lua_State *L, int t); +LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); + +LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename); +LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz, + const char *name); +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s); + +LUALIB_API lua_State *(luaL_newstate) (void); + + +LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p, + const char *r); + +LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx, + const char *fname, int szhint); + + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define luaL_argcheck(L, cond,numarg,extramsg) \ + ((void)((cond) || luaL_argerror(L, (numarg), (extramsg)))) +#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL)) +#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL)) +#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) +#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d))) +#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) +#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) + +#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i))) + +#define luaL_dofile(L, fn) (luaL_loadfile(L, fn) || lua_pcall(L, 0, 0, 0)) + +#define luaL_dostring(L, s) (luaL_loadstring(L, s) || lua_pcall(L, 0, 0, 0)) + +#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n))) + +#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + + +typedef struct luaL_Buffer { + char *p; /* current position in buffer */ + int lvl; /* number of strings in the stack (level) */ + lua_State *L; + char buffer[LUAL_BUFFERSIZE]; +} luaL_Buffer; + +#define luaL_addchar(B,c) \ + ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \ + (*(B)->p++ = (char)(c))) + +/* compatibility only */ +#define luaL_putchar(B,c) luaL_addchar(B,c) + +#define luaL_addsize(B,n) ((B)->p += (n)) + +LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B); +LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B); +LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l); +LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s); +LUALIB_API void (luaL_addvalue) (luaL_Buffer *B); +LUALIB_API void (luaL_pushresult) (luaL_Buffer *B); + + +/* }====================================================== */ + + +/* compatibility with ref system */ + +/* pre-defined references */ +#define LUA_NOREF (-2) +#define LUA_REFNIL (-1) + +#define lua_ref(L,lock) ((lock) ? luaL_ref(L, LUA_REGISTRYINDEX) : \ + (lua_pushstring(L, "unlocked references are obsolete"), lua_error(L), 0)) + +#define lua_unref(L,ref) luaL_unref(L, LUA_REGISTRYINDEX, (ref)) + +#define lua_getref(L,ref) lua_rawgeti(L, LUA_REGISTRYINDEX, (ref)) + + +#define luaL_reg luaL_Reg + +#endif + + diff --git a/deps/lua/src/lbaselib.c b/deps/lua/src/lbaselib.c new file mode 100644 index 0000000000000000000000000000000000000000..1d922a81054034c87704bddcff212401a65374d1 --- /dev/null +++ b/deps/lua/src/lbaselib.c @@ -0,0 +1,643 @@ +/* +** $Id: lbaselib.c,v 1.189 2006/01/18 11:49:12 roberto Exp $ +** Basic library +** See Copyright Notice in lua.h +*/ + + + +#include +#include +#include +#include + +#define lbaselib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + + +/* +** If your system does not support `stdout', you can just remove this function. +** If you need, you can define your own `print' function, following this +** model but changing `fputs' to put the strings at a proper place +** (a console window or a log file, for instance). +*/ +static int luaB_print (lua_State *L) { + int n = lua_gettop(L); /* number of arguments */ + int i; + lua_getglobal(L, "tostring"); + for (i=1; i<=n; i++) { + const char *s; + lua_pushvalue(L, -1); /* function to be called */ + lua_pushvalue(L, i); /* value to print */ + lua_call(L, 1, 1); + s = lua_tostring(L, -1); /* get result */ + if (s == NULL) + return luaL_error(L, LUA_QL("tostring") " must return a string to " + LUA_QL("print")); + if (i>1) fputs("\t", stdout); + fputs(s, stdout); + lua_pop(L, 1); /* pop result */ + } + fputs("\n", stdout); + return 0; +} + + +static int luaB_tonumber (lua_State *L) { + int base = luaL_optint(L, 2, 10); + if (base == 10) { /* standard conversion */ + luaL_checkany(L, 1); + if (lua_isnumber(L, 1)) { + lua_pushnumber(L, lua_tonumber(L, 1)); + return 1; + } + } + else { + const char *s1 = luaL_checkstring(L, 1); + char *s2; + unsigned long n; + luaL_argcheck(L, 2 <= base && base <= 36, 2, "base out of range"); + n = strtoul(s1, &s2, base); + if (s1 != s2) { /* at least one valid digit? */ + while (isspace((unsigned char)(*s2))) s2++; /* skip trailing spaces */ + if (*s2 == '\0') { /* no invalid trailing characters? */ + lua_pushnumber(L, (lua_Number)n); + return 1; + } + } + } + lua_pushnil(L); /* else not a number */ + return 1; +} + + +static int luaB_error (lua_State *L) { + int level = luaL_optint(L, 2, 1); + lua_settop(L, 1); + if (lua_isstring(L, 1) && level > 0) { /* add extra information? */ + luaL_where(L, level); + lua_pushvalue(L, 1); + lua_concat(L, 2); + } + return lua_error(L); +} + + +static int luaB_getmetatable (lua_State *L) { + luaL_checkany(L, 1); + if (!lua_getmetatable(L, 1)) { + lua_pushnil(L); + return 1; /* no metatable */ + } + luaL_getmetafield(L, 1, "__metatable"); + return 1; /* returns either __metatable field (if present) or metatable */ +} + + +static int luaB_setmetatable (lua_State *L) { + int t = lua_type(L, 2); + luaL_checktype(L, 1, LUA_TTABLE); + luaL_argcheck(L, t == LUA_TNIL || t == LUA_TTABLE, 2, + "nil or table expected"); + if (luaL_getmetafield(L, 1, "__metatable")) + luaL_error(L, "cannot change a protected metatable"); + lua_settop(L, 2); + lua_setmetatable(L, 1); + return 1; +} + + +static void getfunc (lua_State *L) { + if (lua_isfunction(L, 1)) lua_pushvalue(L, 1); + else { + lua_Debug ar; + int level = luaL_optint(L, 1, 1); + luaL_argcheck(L, level >= 0, 1, "level must be non-negative"); + if (lua_getstack(L, level, &ar) == 0) + luaL_argerror(L, 1, "invalid level"); + lua_getinfo(L, "f", &ar); + if (lua_isnil(L, -1)) + luaL_error(L, "no function environment for tail call at level %d", + level); + } +} + + +static int luaB_getfenv (lua_State *L) { + getfunc(L); + if (lua_iscfunction(L, -1)) /* is a C function? */ + lua_pushvalue(L, LUA_GLOBALSINDEX); /* return the thread's global env. */ + else + lua_getfenv(L, -1); + return 1; +} + + +static int luaB_setfenv (lua_State *L) { + luaL_checktype(L, 2, LUA_TTABLE); + getfunc(L); + lua_pushvalue(L, 2); + if (lua_isnumber(L, 1) && lua_tonumber(L, 1) == 0) { + /* change environment of current thread */ + lua_pushthread(L); + lua_insert(L, -2); + lua_setfenv(L, -2); + return 0; + } + else if (lua_iscfunction(L, -2) || lua_setfenv(L, -2) == 0) + luaL_error(L, + LUA_QL("setfenv") " cannot change environment of given object"); + return 1; +} + + +static int luaB_rawequal (lua_State *L) { + luaL_checkany(L, 1); + luaL_checkany(L, 2); + lua_pushboolean(L, lua_rawequal(L, 1, 2)); + return 1; +} + + +static int luaB_rawget (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + luaL_checkany(L, 2); + lua_settop(L, 2); + lua_rawget(L, 1); + return 1; +} + +static int luaB_rawset (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + luaL_checkany(L, 2); + luaL_checkany(L, 3); + lua_settop(L, 3); + lua_rawset(L, 1); + return 1; +} + + +static int luaB_gcinfo (lua_State *L) { + lua_pushinteger(L, lua_getgccount(L)); + return 1; +} + + +static int luaB_collectgarbage (lua_State *L) { + static const char *const opts[] = {"stop", "restart", "collect", + "count", "step", "setpause", "setstepmul", NULL}; + static const int optsnum[] = {LUA_GCSTOP, LUA_GCRESTART, LUA_GCCOLLECT, + LUA_GCCOUNT, LUA_GCSTEP, LUA_GCSETPAUSE, LUA_GCSETSTEPMUL}; + int o = luaL_checkoption(L, 1, "collect", opts); + int ex = luaL_optint(L, 2, 0); + int res = lua_gc(L, optsnum[o], ex); + switch (optsnum[o]) { + case LUA_GCCOUNT: { + int b = lua_gc(L, LUA_GCCOUNTB, 0); + lua_pushnumber(L, res + ((lua_Number)b/1024)); + return 1; + } + case LUA_GCSTEP: { + lua_pushboolean(L, res); + return 1; + } + default: { + lua_pushnumber(L, res); + return 1; + } + } +} + + +static int luaB_type (lua_State *L) { + luaL_checkany(L, 1); + lua_pushstring(L, luaL_typename(L, 1)); + return 1; +} + + +static int luaB_next (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + lua_settop(L, 2); /* create a 2nd argument if there isn't one */ + if (lua_next(L, 1)) + return 2; + else { + lua_pushnil(L); + return 1; + } +} + + +static int luaB_pairs (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + lua_pushvalue(L, lua_upvalueindex(1)); /* return generator, */ + lua_pushvalue(L, 1); /* state, */ + lua_pushnil(L); /* and initial value */ + return 3; +} + + +static int ipairsaux (lua_State *L) { + int i = luaL_checkint(L, 2); + luaL_checktype(L, 1, LUA_TTABLE); + i++; /* next value */ + lua_pushinteger(L, i); + lua_rawgeti(L, 1, i); + return (lua_isnil(L, -1)) ? 0 : 2; +} + + +static int luaB_ipairs (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + lua_pushvalue(L, lua_upvalueindex(1)); /* return generator, */ + lua_pushvalue(L, 1); /* state, */ + lua_pushinteger(L, 0); /* and initial value */ + return 3; +} + + +static int load_aux (lua_State *L, int status) { + if (status == 0) /* OK? */ + return 1; + else { + lua_pushnil(L); + lua_insert(L, -2); /* put before error message */ + return 2; /* return nil plus error message */ + } +} + + +static int luaB_loadstring (lua_State *L) { + size_t l; + const char *s = luaL_checklstring(L, 1, &l); + const char *chunkname = luaL_optstring(L, 2, s); + return load_aux(L, luaL_loadbuffer(L, s, l, chunkname)); +} + + +static int luaB_loadfile (lua_State *L) { + const char *fname = luaL_optstring(L, 1, NULL); + return load_aux(L, luaL_loadfile(L, fname)); +} + + +/* +** Reader for generic `load' function: `lua_load' uses the +** stack for internal stuff, so the reader cannot change the +** stack top. Instead, it keeps its resulting string in a +** reserved slot inside the stack. +*/ +static const char *generic_reader (lua_State *L, void *ud, size_t *size) { + (void)ud; /* to avoid warnings */ + luaL_checkstack(L, 2, "too many nested functions"); + lua_pushvalue(L, 1); /* get function */ + lua_call(L, 0, 1); /* call it */ + if (lua_isnil(L, -1)) { + *size = 0; + return NULL; + } + else if (lua_isstring(L, -1)) { + lua_replace(L, 3); /* save string in a reserved stack slot */ + return lua_tolstring(L, 3, size); + } + else luaL_error(L, "reader function must return a string"); + return NULL; /* to avoid warnings */ +} + + +static int luaB_load (lua_State *L) { + int status; + const char *cname = luaL_optstring(L, 2, "=(load)"); + luaL_checktype(L, 1, LUA_TFUNCTION); + lua_settop(L, 3); /* function, eventual name, plus one reserved slot */ + status = lua_load(L, generic_reader, NULL, cname); + return load_aux(L, status); +} + + +static int luaB_dofile (lua_State *L) { + const char *fname = luaL_optstring(L, 1, NULL); + int n = lua_gettop(L); + if (luaL_loadfile(L, fname) != 0) lua_error(L); + lua_call(L, 0, LUA_MULTRET); + return lua_gettop(L) - n; +} + + +static int luaB_assert (lua_State *L) { + luaL_checkany(L, 1); + if (!lua_toboolean(L, 1)) + return luaL_error(L, "%s", luaL_optstring(L, 2, "assertion failed!")); + return lua_gettop(L); +} + + +static int luaB_unpack (lua_State *L) { + int i, e, n; + luaL_checktype(L, 1, LUA_TTABLE); + i = luaL_optint(L, 2, 1); + e = luaL_opt(L, luaL_checkint, 3, luaL_getn(L, 1)); + n = e - i + 1; /* number of elements */ + if (n <= 0) return 0; /* empty range */ + luaL_checkstack(L, n, "table too big to unpack"); + for (; i<=e; i++) /* push arg[i...e] */ + lua_rawgeti(L, 1, i); + return n; +} + + +static int luaB_select (lua_State *L) { + int n = lua_gettop(L); + if (lua_type(L, 1) == LUA_TSTRING && *lua_tostring(L, 1) == '#') { + lua_pushinteger(L, n-1); + return 1; + } + else { + int i = luaL_checkint(L, 1); + if (i < 0) i = n + i; + else if (i > n) i = n; + luaL_argcheck(L, 1 <= i, 1, "index out of range"); + return n - i; + } +} + + +static int luaB_pcall (lua_State *L) { + int status; + luaL_checkany(L, 1); + status = lua_pcall(L, lua_gettop(L) - 1, LUA_MULTRET, 0); + lua_pushboolean(L, (status == 0)); + lua_insert(L, 1); + return lua_gettop(L); /* return status + all results */ +} + + +static int luaB_xpcall (lua_State *L) { + int status; + luaL_checkany(L, 2); + lua_settop(L, 2); + lua_insert(L, 1); /* put error function under function to be called */ + status = lua_pcall(L, 0, LUA_MULTRET, 1); + lua_pushboolean(L, (status == 0)); + lua_replace(L, 1); + return lua_gettop(L); /* return status + all results */ +} + + +static int luaB_tostring (lua_State *L) { + luaL_checkany(L, 1); + if (luaL_callmeta(L, 1, "__tostring")) /* is there a metafield? */ + return 1; /* use its value */ + switch (lua_type(L, 1)) { + case LUA_TNUMBER: + lua_pushstring(L, lua_tostring(L, 1)); + break; + case LUA_TSTRING: + lua_pushvalue(L, 1); + break; + case LUA_TBOOLEAN: + lua_pushstring(L, (lua_toboolean(L, 1) ? "true" : "false")); + break; + case LUA_TNIL: + lua_pushliteral(L, "nil"); + break; + default: + lua_pushfstring(L, "%s: %p", luaL_typename(L, 1), lua_topointer(L, 1)); + break; + } + return 1; +} + + +static int luaB_newproxy (lua_State *L) { + lua_settop(L, 1); + lua_newuserdata(L, 0); /* create proxy */ + if (lua_toboolean(L, 1) == 0) + return 1; /* no metatable */ + else if (lua_isboolean(L, 1)) { + lua_newtable(L); /* create a new metatable `m' ... */ + lua_pushvalue(L, -1); /* ... and mark `m' as a valid metatable */ + lua_pushboolean(L, 1); + lua_rawset(L, lua_upvalueindex(1)); /* weaktable[m] = true */ + } + else { + int validproxy = 0; /* to check if weaktable[metatable(u)] == true */ + if (lua_getmetatable(L, 1)) { + lua_rawget(L, lua_upvalueindex(1)); + validproxy = lua_toboolean(L, -1); + lua_pop(L, 1); /* remove value */ + } + luaL_argcheck(L, validproxy, 1, "boolean or proxy expected"); + lua_getmetatable(L, 1); /* metatable is valid; get it */ + } + lua_setmetatable(L, 2); + return 1; +} + + +static const luaL_Reg base_funcs[] = { + {"assert", luaB_assert}, + {"collectgarbage", luaB_collectgarbage}, + {"dofile", luaB_dofile}, + {"error", luaB_error}, + {"gcinfo", luaB_gcinfo}, + {"getfenv", luaB_getfenv}, + {"getmetatable", luaB_getmetatable}, + {"loadfile", luaB_loadfile}, + {"load", luaB_load}, + {"loadstring", luaB_loadstring}, + {"next", luaB_next}, + {"pcall", luaB_pcall}, + {"print", luaB_print}, + {"rawequal", luaB_rawequal}, + {"rawget", luaB_rawget}, + {"rawset", luaB_rawset}, + {"select", luaB_select}, + {"setfenv", luaB_setfenv}, + {"setmetatable", luaB_setmetatable}, + {"tonumber", luaB_tonumber}, + {"tostring", luaB_tostring}, + {"type", luaB_type}, + {"unpack", luaB_unpack}, + {"xpcall", luaB_xpcall}, + {NULL, NULL} +}; + + +/* +** {====================================================== +** Coroutine library +** ======================================================= +*/ + +static int auxresume (lua_State *L, lua_State *co, int narg) { + int status; + if (!lua_checkstack(co, narg)) + luaL_error(L, "too many arguments to resume"); + if (lua_status(co) == 0 && lua_gettop(co) == 0) { + lua_pushliteral(L, "cannot resume dead coroutine"); + return -1; /* error flag */ + } + lua_xmove(L, co, narg); + status = lua_resume(co, narg); + if (status == 0 || status == LUA_YIELD) { + int nres = lua_gettop(co); + if (!lua_checkstack(L, nres)) + luaL_error(L, "too many results to resume"); + lua_xmove(co, L, nres); /* move yielded values */ + return nres; + } + else { + lua_xmove(co, L, 1); /* move error message */ + return -1; /* error flag */ + } +} + + +static int luaB_coresume (lua_State *L) { + lua_State *co = lua_tothread(L, 1); + int r; + luaL_argcheck(L, co, 1, "coroutine expected"); + r = auxresume(L, co, lua_gettop(L) - 1); + if (r < 0) { + lua_pushboolean(L, 0); + lua_insert(L, -2); + return 2; /* return false + error message */ + } + else { + lua_pushboolean(L, 1); + lua_insert(L, -(r + 1)); + return r + 1; /* return true + `resume' returns */ + } +} + + +static int luaB_auxwrap (lua_State *L) { + lua_State *co = lua_tothread(L, lua_upvalueindex(1)); + int r = auxresume(L, co, lua_gettop(L)); + if (r < 0) { + if (lua_isstring(L, -1)) { /* error object is a string? */ + luaL_where(L, 1); /* add extra info */ + lua_insert(L, -2); + lua_concat(L, 2); + } + lua_error(L); /* propagate error */ + } + return r; +} + + +static int luaB_cocreate (lua_State *L) { + lua_State *NL = lua_newthread(L); + luaL_argcheck(L, lua_isfunction(L, 1) && !lua_iscfunction(L, 1), 1, + "Lua function expected"); + lua_pushvalue(L, 1); /* move function to top */ + lua_xmove(L, NL, 1); /* move function from L to NL */ + return 1; +} + + +static int luaB_cowrap (lua_State *L) { + luaB_cocreate(L); + lua_pushcclosure(L, luaB_auxwrap, 1); + return 1; +} + + +static int luaB_yield (lua_State *L) { + return lua_yield(L, lua_gettop(L)); +} + + +static int luaB_costatus (lua_State *L) { + lua_State *co = lua_tothread(L, 1); + luaL_argcheck(L, co, 1, "coroutine expected"); + if (L == co) lua_pushliteral(L, "running"); + else { + switch (lua_status(co)) { + case LUA_YIELD: + lua_pushliteral(L, "suspended"); + break; + case 0: { + lua_Debug ar; + if (lua_getstack(co, 0, &ar) > 0) /* does it have frames? */ + lua_pushliteral(L, "normal"); /* it is running */ + else if (lua_gettop(co) == 0) + lua_pushliteral(L, "dead"); + else + lua_pushliteral(L, "suspended"); /* initial state */ + break; + } + default: /* some error occured */ + lua_pushliteral(L, "dead"); + break; + } + } + return 1; +} + + +static int luaB_corunning (lua_State *L) { + if (lua_pushthread(L)) + return 0; /* main thread is not a coroutine */ + else + return 1; +} + + +static const luaL_Reg co_funcs[] = { + {"create", luaB_cocreate}, + {"resume", luaB_coresume}, + {"running", luaB_corunning}, + {"status", luaB_costatus}, + {"wrap", luaB_cowrap}, + {"yield", luaB_yield}, + {NULL, NULL} +}; + +/* }====================================================== */ + + +static void auxopen (lua_State *L, const char *name, + lua_CFunction f, lua_CFunction u) { + lua_pushcfunction(L, u); + lua_pushcclosure(L, f, 1); + lua_setfield(L, -2, name); +} + + +static void base_open (lua_State *L) { + /* set global _G */ + lua_pushvalue(L, LUA_GLOBALSINDEX); + lua_setglobal(L, "_G"); + /* open lib into global table */ + luaL_register(L, "_G", base_funcs); + lua_pushliteral(L, LUA_VERSION); + lua_setglobal(L, "_VERSION"); /* set global _VERSION */ + /* `ipairs' and `pairs' need auxliliary functions as upvalues */ + auxopen(L, "ipairs", luaB_ipairs, ipairsaux); + auxopen(L, "pairs", luaB_pairs, luaB_next); + /* `newproxy' needs a weaktable as upvalue */ + lua_createtable(L, 0, 1); /* new table `w' */ + lua_pushvalue(L, -1); /* `w' will be its own metatable */ + lua_setmetatable(L, -2); + lua_pushliteral(L, "kv"); + lua_setfield(L, -2, "__mode"); /* metatable(w).__mode = "kv" */ + lua_pushcclosure(L, luaB_newproxy, 1); + lua_setglobal(L, "newproxy"); /* set global `newproxy' */ +} + + +LUALIB_API int luaopen_base (lua_State *L) { + base_open(L); + luaL_register(L, LUA_COLIBNAME, co_funcs); + return 2; +} + diff --git a/deps/lua/src/lcode.c b/deps/lua/src/lcode.c new file mode 100644 index 0000000000000000000000000000000000000000..dd3e37e78cad0914dd0f6149d8d5a1d8e054425c --- /dev/null +++ b/deps/lua/src/lcode.c @@ -0,0 +1,825 @@ +/* +** $Id: lcode.c,v 2.24 2005/12/22 16:19:56 roberto Exp $ +** Code generator for Lua +** See Copyright Notice in lua.h +*/ + + +#include + +#define lcode_c +#define LUA_CORE + +#include "lua.h" + +#include "lcode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lgc.h" +#include "llex.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" +#include "ltable.h" + + +#define hasjumps(e) ((e)->t != (e)->f) + + +static int isnumeral(expdesc *e) { + return (e->k == VKNUM && e->t == NO_JUMP && e->f == NO_JUMP); +} + + +void luaK_nil (FuncState *fs, int from, int n) { + Instruction *previous; + if (fs->pc > fs->lasttarget) { /* no jumps to current position? */ + if (fs->pc == 0) /* function start? */ + return; /* positions are already clean */ + if (GET_OPCODE(*(previous = &fs->f->code[fs->pc-1])) == OP_LOADNIL) { + int pfrom = GETARG_A(*previous); + int pto = GETARG_B(*previous); + if (pfrom <= from && from <= pto+1) { /* can connect both? */ + if (from+n-1 > pto) + SETARG_B(*previous, from+n-1); + return; + } + } + } + luaK_codeABC(fs, OP_LOADNIL, from, from+n-1, 0); /* else no optimization */ +} + + +int luaK_jump (FuncState *fs) { + int jpc = fs->jpc; /* save list of jumps to here */ + int j; + fs->jpc = NO_JUMP; + j = luaK_codeAsBx(fs, OP_JMP, 0, NO_JUMP); + luaK_concat(fs, &j, jpc); /* keep them on hold */ + return j; +} + + +void luaK_ret (FuncState *fs, int first, int nret) { + luaK_codeABC(fs, OP_RETURN, first, nret+1, 0); +} + + +static int condjump (FuncState *fs, OpCode op, int A, int B, int C) { + luaK_codeABC(fs, op, A, B, C); + return luaK_jump(fs); +} + + +static void fixjump (FuncState *fs, int pc, int dest) { + Instruction *jmp = &fs->f->code[pc]; + int offset = dest-(pc+1); + lua_assert(dest != NO_JUMP); + if (abs(offset) > MAXARG_sBx) + luaX_syntaxerror(fs->ls, "control structure too long"); + SETARG_sBx(*jmp, offset); +} + + +/* +** returns current `pc' and marks it as a jump target (to avoid wrong +** optimizations with consecutive instructions not in the same basic block). +*/ +int luaK_getlabel (FuncState *fs) { + fs->lasttarget = fs->pc; + return fs->pc; +} + + +static int getjump (FuncState *fs, int pc) { + int offset = GETARG_sBx(fs->f->code[pc]); + if (offset == NO_JUMP) /* point to itself represents end of list */ + return NO_JUMP; /* end of list */ + else + return (pc+1)+offset; /* turn offset into absolute position */ +} + + +static Instruction *getjumpcontrol (FuncState *fs, int pc) { + Instruction *pi = &fs->f->code[pc]; + if (pc >= 1 && testTMode(GET_OPCODE(*(pi-1)))) + return pi-1; + else + return pi; +} + + +/* +** check whether list has any jump that do not produce a value +** (or produce an inverted value) +*/ +static int need_value (FuncState *fs, int list) { + for (; list != NO_JUMP; list = getjump(fs, list)) { + Instruction i = *getjumpcontrol(fs, list); + if (GET_OPCODE(i) != OP_TESTSET) return 1; + } + return 0; /* not found */ +} + + +static int patchtestreg (FuncState *fs, int node, int reg) { + Instruction *i = getjumpcontrol(fs, node); + if (GET_OPCODE(*i) != OP_TESTSET) + return 0; /* cannot patch other instructions */ + if (reg != NO_REG && reg != GETARG_B(*i)) + SETARG_A(*i, reg); + else /* no register to put value or register already has the value */ + *i = CREATE_ABC(OP_TEST, GETARG_B(*i), 0, GETARG_C(*i)); + + return 1; +} + + +static void removevalues (FuncState *fs, int list) { + for (; list != NO_JUMP; list = getjump(fs, list)) + patchtestreg(fs, list, NO_REG); +} + + +static void patchlistaux (FuncState *fs, int list, int vtarget, int reg, + int dtarget) { + while (list != NO_JUMP) { + int next = getjump(fs, list); + if (patchtestreg(fs, list, reg)) + fixjump(fs, list, vtarget); + else + fixjump(fs, list, dtarget); /* jump to default target */ + list = next; + } +} + + +static void dischargejpc (FuncState *fs) { + patchlistaux(fs, fs->jpc, fs->pc, NO_REG, fs->pc); + fs->jpc = NO_JUMP; +} + + +void luaK_patchlist (FuncState *fs, int list, int target) { + if (target == fs->pc) + luaK_patchtohere(fs, list); + else { + lua_assert(target < fs->pc); + patchlistaux(fs, list, target, NO_REG, target); + } +} + + +void luaK_patchtohere (FuncState *fs, int list) { + luaK_getlabel(fs); + luaK_concat(fs, &fs->jpc, list); +} + + +void luaK_concat (FuncState *fs, int *l1, int l2) { + if (l2 == NO_JUMP) return; + else if (*l1 == NO_JUMP) + *l1 = l2; + else { + int list = *l1; + int next; + while ((next = getjump(fs, list)) != NO_JUMP) /* find last element */ + list = next; + fixjump(fs, list, l2); + } +} + + +void luaK_checkstack (FuncState *fs, int n) { + int newstack = fs->freereg + n; + if (newstack > fs->f->maxstacksize) { + if (newstack >= MAXSTACK) + luaX_syntaxerror(fs->ls, "function or expression too complex"); + fs->f->maxstacksize = cast_byte(newstack); + } +} + + +void luaK_reserveregs (FuncState *fs, int n) { + luaK_checkstack(fs, n); + fs->freereg += n; +} + + +static void freereg (FuncState *fs, int reg) { + if (!ISK(reg) && reg >= fs->nactvar) { + fs->freereg--; + lua_assert(reg == fs->freereg); + } +} + + +static void freeexp (FuncState *fs, expdesc *e) { + if (e->k == VNONRELOC) + freereg(fs, e->u.s.info); +} + + +static int addk (FuncState *fs, TValue *k, TValue *v) { + lua_State *L = fs->L; + TValue *idx = luaH_set(L, fs->h, k); + Proto *f = fs->f; + int oldsize = f->sizek; + if (ttisnumber(idx)) { + lua_assert(luaO_rawequalObj(&fs->f->k[cast_int(nvalue(idx))], v)); + return cast_int(nvalue(idx)); + } + else { /* constant not found; create a new entry */ + setnvalue(idx, cast_num(fs->nk)); + luaM_growvector(L, f->k, fs->nk, f->sizek, TValue, + MAXARG_Bx, "constant table overflow"); + while (oldsize < f->sizek) setnilvalue(&f->k[oldsize++]); + setobj(L, &f->k[fs->nk], v); + luaC_barrier(L, f, v); + return fs->nk++; + } +} + + +int luaK_stringK (FuncState *fs, TString *s) { + TValue o; + setsvalue(fs->L, &o, s); + return addk(fs, &o, &o); +} + + +int luaK_numberK (FuncState *fs, lua_Number r) { + TValue o; + setnvalue(&o, r); + return addk(fs, &o, &o); +} + + +static int boolK (FuncState *fs, int b) { + TValue o; + setbvalue(&o, b); + return addk(fs, &o, &o); +} + + +static int nilK (FuncState *fs) { + TValue k, v; + setnilvalue(&v); + /* cannot use nil as key; instead use table itself to represent nil */ + sethvalue(fs->L, &k, fs->h); + return addk(fs, &k, &v); +} + + +void luaK_setreturns (FuncState *fs, expdesc *e, int nresults) { + if (e->k == VCALL) { /* expression is an open function call? */ + SETARG_C(getcode(fs, e), nresults+1); + } + else if (e->k == VVARARG) { + SETARG_B(getcode(fs, e), nresults+1); + SETARG_A(getcode(fs, e), fs->freereg); + luaK_reserveregs(fs, 1); + } +} + + +void luaK_setoneret (FuncState *fs, expdesc *e) { + if (e->k == VCALL) { /* expression is an open function call? */ + e->k = VNONRELOC; + e->u.s.info = GETARG_A(getcode(fs, e)); + } + else if (e->k == VVARARG) { + SETARG_B(getcode(fs, e), 2); + e->k = VRELOCABLE; /* can relocate its simple result */ + } +} + + +void luaK_dischargevars (FuncState *fs, expdesc *e) { + switch (e->k) { + case VLOCAL: { + e->k = VNONRELOC; + break; + } + case VUPVAL: { + e->u.s.info = luaK_codeABC(fs, OP_GETUPVAL, 0, e->u.s.info, 0); + e->k = VRELOCABLE; + break; + } + case VGLOBAL: { + e->u.s.info = luaK_codeABx(fs, OP_GETGLOBAL, 0, e->u.s.info); + e->k = VRELOCABLE; + break; + } + case VINDEXED: { + freereg(fs, e->u.s.aux); + freereg(fs, e->u.s.info); + e->u.s.info = luaK_codeABC(fs, OP_GETTABLE, 0, e->u.s.info, e->u.s.aux); + e->k = VRELOCABLE; + break; + } + case VVARARG: + case VCALL: { + luaK_setoneret(fs, e); + break; + } + default: break; /* there is one value available (somewhere) */ + } +} + + +static int code_label (FuncState *fs, int A, int b, int jump) { + luaK_getlabel(fs); /* those instructions may be jump targets */ + return luaK_codeABC(fs, OP_LOADBOOL, A, b, jump); +} + + +static void discharge2reg (FuncState *fs, expdesc *e, int reg) { + luaK_dischargevars(fs, e); + switch (e->k) { + case VNIL: { + luaK_nil(fs, reg, 1); + break; + } + case VFALSE: case VTRUE: { + luaK_codeABC(fs, OP_LOADBOOL, reg, e->k == VTRUE, 0); + break; + } + case VK: { + luaK_codeABx(fs, OP_LOADK, reg, e->u.s.info); + break; + } + case VKNUM: { + luaK_codeABx(fs, OP_LOADK, reg, luaK_numberK(fs, e->u.nval)); + break; + } + case VRELOCABLE: { + Instruction *pc = &getcode(fs, e); + SETARG_A(*pc, reg); + break; + } + case VNONRELOC: { + if (reg != e->u.s.info) + luaK_codeABC(fs, OP_MOVE, reg, e->u.s.info, 0); + break; + } + default: { + lua_assert(e->k == VVOID || e->k == VJMP); + return; /* nothing to do... */ + } + } + e->u.s.info = reg; + e->k = VNONRELOC; +} + + +static void discharge2anyreg (FuncState *fs, expdesc *e) { + if (e->k != VNONRELOC) { + luaK_reserveregs(fs, 1); + discharge2reg(fs, e, fs->freereg-1); + } +} + + +static void exp2reg (FuncState *fs, expdesc *e, int reg) { + discharge2reg(fs, e, reg); + if (e->k == VJMP) + luaK_concat(fs, &e->t, e->u.s.info); /* put this jump in `t' list */ + if (hasjumps(e)) { + int final; /* position after whole expression */ + int p_f = NO_JUMP; /* position of an eventual LOAD false */ + int p_t = NO_JUMP; /* position of an eventual LOAD true */ + if (need_value(fs, e->t) || need_value(fs, e->f)) { + int fj = (e->k == VJMP) ? NO_JUMP : luaK_jump(fs); + p_f = code_label(fs, reg, 0, 1); + p_t = code_label(fs, reg, 1, 0); + luaK_patchtohere(fs, fj); + } + final = luaK_getlabel(fs); + patchlistaux(fs, e->f, final, reg, p_f); + patchlistaux(fs, e->t, final, reg, p_t); + } + e->f = e->t = NO_JUMP; + e->u.s.info = reg; + e->k = VNONRELOC; +} + + +void luaK_exp2nextreg (FuncState *fs, expdesc *e) { + luaK_dischargevars(fs, e); + freeexp(fs, e); + luaK_reserveregs(fs, 1); + exp2reg(fs, e, fs->freereg - 1); +} + + +int luaK_exp2anyreg (FuncState *fs, expdesc *e) { + luaK_dischargevars(fs, e); + if (e->k == VNONRELOC) { + if (!hasjumps(e)) return e->u.s.info; /* exp is already in a register */ + if (e->u.s.info >= fs->nactvar) { /* reg. is not a local? */ + exp2reg(fs, e, e->u.s.info); /* put value on it */ + return e->u.s.info; + } + } + luaK_exp2nextreg(fs, e); /* default */ + return e->u.s.info; +} + + +void luaK_exp2val (FuncState *fs, expdesc *e) { + if (hasjumps(e)) + luaK_exp2anyreg(fs, e); + else + luaK_dischargevars(fs, e); +} + + +int luaK_exp2RK (FuncState *fs, expdesc *e) { + luaK_exp2val(fs, e); + switch (e->k) { + case VKNUM: + case VTRUE: + case VFALSE: + case VNIL: { + if (fs->nk <= MAXINDEXRK) { /* constant fit in RK operand? */ + e->u.s.info = (e->k == VNIL) ? nilK(fs) : + (e->k == VKNUM) ? luaK_numberK(fs, e->u.nval) : + boolK(fs, (e->k == VTRUE)); + e->k = VK; + return RKASK(e->u.s.info); + } + else break; + } + case VK: { + if (e->u.s.info <= MAXINDEXRK) /* constant fit in argC? */ + return RKASK(e->u.s.info); + else break; + } + default: break; + } + /* not a constant in the right range: put it in a register */ + return luaK_exp2anyreg(fs, e); +} + + +void luaK_storevar (FuncState *fs, expdesc *var, expdesc *ex) { + switch (var->k) { + case VLOCAL: { + freeexp(fs, ex); + exp2reg(fs, ex, var->u.s.info); + return; + } + case VUPVAL: { + int e = luaK_exp2anyreg(fs, ex); + luaK_codeABC(fs, OP_SETUPVAL, e, var->u.s.info, 0); + break; + } + case VGLOBAL: { + int e = luaK_exp2anyreg(fs, ex); + luaK_codeABx(fs, OP_SETGLOBAL, e, var->u.s.info); + break; + } + case VINDEXED: { + int e = luaK_exp2RK(fs, ex); + luaK_codeABC(fs, OP_SETTABLE, var->u.s.info, var->u.s.aux, e); + break; + } + default: { + lua_assert(0); /* invalid var kind to store */ + break; + } + } + freeexp(fs, ex); +} + + +void luaK_self (FuncState *fs, expdesc *e, expdesc *key) { + int func; + luaK_exp2anyreg(fs, e); + freeexp(fs, e); + func = fs->freereg; + luaK_reserveregs(fs, 2); + luaK_codeABC(fs, OP_SELF, func, e->u.s.info, luaK_exp2RK(fs, key)); + freeexp(fs, key); + e->u.s.info = func; + e->k = VNONRELOC; +} + + +static void invertjump (FuncState *fs, expdesc *e) { + Instruction *pc = getjumpcontrol(fs, e->u.s.info); + lua_assert(testTMode(GET_OPCODE(*pc)) && GET_OPCODE(*pc) != OP_TESTSET && + GET_OPCODE(*pc) != OP_TEST); + SETARG_A(*pc, !(GETARG_A(*pc))); +} + + +static int jumponcond (FuncState *fs, expdesc *e, int cond) { + if (e->k == VRELOCABLE) { + Instruction ie = getcode(fs, e); + if (GET_OPCODE(ie) == OP_NOT) { + fs->pc--; /* remove previous OP_NOT */ + return condjump(fs, OP_TEST, GETARG_B(ie), 0, !cond); + } + /* else go through */ + } + discharge2anyreg(fs, e); + freeexp(fs, e); + return condjump(fs, OP_TESTSET, NO_REG, e->u.s.info, cond); +} + + +void luaK_goiftrue (FuncState *fs, expdesc *e) { + int pc; /* pc of last jump */ + luaK_dischargevars(fs, e); + switch (e->k) { + case VK: case VKNUM: case VTRUE: { + pc = NO_JUMP; /* always true; do nothing */ + break; + } + case VFALSE: { + pc = luaK_jump(fs); /* always jump */ + break; + } + case VJMP: { + invertjump(fs, e); + pc = e->u.s.info; + break; + } + default: { + pc = jumponcond(fs, e, 0); + break; + } + } + luaK_concat(fs, &e->f, pc); /* insert last jump in `f' list */ + luaK_patchtohere(fs, e->t); + e->t = NO_JUMP; +} + + +static void luaK_goiffalse (FuncState *fs, expdesc *e) { + int pc; /* pc of last jump */ + luaK_dischargevars(fs, e); + switch (e->k) { + case VNIL: case VFALSE: { + pc = NO_JUMP; /* always false; do nothing */ + break; + } + case VTRUE: { + pc = luaK_jump(fs); /* always jump */ + break; + } + case VJMP: { + pc = e->u.s.info; + break; + } + default: { + pc = jumponcond(fs, e, 1); + break; + } + } + luaK_concat(fs, &e->t, pc); /* insert last jump in `t' list */ + luaK_patchtohere(fs, e->f); + e->f = NO_JUMP; +} + + +static void codenot (FuncState *fs, expdesc *e) { + luaK_dischargevars(fs, e); + switch (e->k) { + case VNIL: case VFALSE: { + e->k = VTRUE; + break; + } + case VK: case VKNUM: case VTRUE: { + e->k = VFALSE; + break; + } + case VJMP: { + invertjump(fs, e); + break; + } + case VRELOCABLE: + case VNONRELOC: { + discharge2anyreg(fs, e); + freeexp(fs, e); + e->u.s.info = luaK_codeABC(fs, OP_NOT, 0, e->u.s.info, 0); + e->k = VRELOCABLE; + break; + } + default: { + lua_assert(0); /* cannot happen */ + break; + } + } + /* interchange true and false lists */ + { int temp = e->f; e->f = e->t; e->t = temp; } + removevalues(fs, e->f); + removevalues(fs, e->t); +} + + +void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k) { + t->u.s.aux = luaK_exp2RK(fs, k); + t->k = VINDEXED; +} + + +static int constfolding (OpCode op, expdesc *e1, expdesc *e2) { + lua_Number v1, v2, r; + if (!isnumeral(e1) || !isnumeral(e2)) return 0; + v1 = e1->u.nval; + v2 = e2->u.nval; + switch (op) { + case OP_ADD: r = luai_numadd(v1, v2); break; + case OP_SUB: r = luai_numsub(v1, v2); break; + case OP_MUL: r = luai_nummul(v1, v2); break; + case OP_DIV: + if (v2 == 0) return 0; /* do not attempt to divide by 0 */ + r = luai_numdiv(v1, v2); break; + case OP_MOD: + if (v2 == 0) return 0; /* do not attempt to divide by 0 */ + r = luai_nummod(v1, v2); break; + case OP_POW: r = luai_numpow(v1, v2); break; + case OP_UNM: r = luai_numunm(v1); break; + case OP_LEN: return 0; /* no constant folding for 'len' */ + default: lua_assert(0); r = 0; break; + } + if (luai_numisnan(r)) return 0; /* do not attempt to produce NaN */ + e1->u.nval = r; + return 1; +} + + +static void codearith (FuncState *fs, OpCode op, expdesc *e1, expdesc *e2) { + if (constfolding(op, e1, e2)) + return; + else { + int o1 = luaK_exp2RK(fs, e1); + int o2 = (op != OP_UNM && op != OP_LEN) ? luaK_exp2RK(fs, e2) : 0; + freeexp(fs, e2); + freeexp(fs, e1); + e1->u.s.info = luaK_codeABC(fs, op, 0, o1, o2); + e1->k = VRELOCABLE; + } +} + + +static void codecomp (FuncState *fs, OpCode op, int cond, expdesc *e1, + expdesc *e2) { + int o1 = luaK_exp2RK(fs, e1); + int o2 = luaK_exp2RK(fs, e2); + freeexp(fs, e2); + freeexp(fs, e1); + if (cond == 0 && op != OP_EQ) { + int temp; /* exchange args to replace by `<' or `<=' */ + temp = o1; o1 = o2; o2 = temp; /* o1 <==> o2 */ + cond = 1; + } + e1->u.s.info = condjump(fs, op, cond, o1, o2); + e1->k = VJMP; +} + + +void luaK_prefix (FuncState *fs, UnOpr op, expdesc *e) { + expdesc e2; + e2.t = e2.f = NO_JUMP; e2.k = VKNUM; e2.u.nval = 0; + switch (op) { + case OPR_MINUS: { + if (e->k == VK) + luaK_exp2anyreg(fs, e); /* cannot operate on non-numeric constants */ + codearith(fs, OP_UNM, e, &e2); + break; + } + case OPR_NOT: codenot(fs, e); break; + case OPR_LEN: { + luaK_exp2anyreg(fs, e); /* cannot operate on constants */ + codearith(fs, OP_LEN, e, &e2); + break; + } + default: lua_assert(0); + } +} + + +void luaK_infix (FuncState *fs, BinOpr op, expdesc *v) { + switch (op) { + case OPR_AND: { + luaK_goiftrue(fs, v); + break; + } + case OPR_OR: { + luaK_goiffalse(fs, v); + break; + } + case OPR_CONCAT: { + luaK_exp2nextreg(fs, v); /* operand must be on the `stack' */ + break; + } + default: { + if (!isnumeral(v)) luaK_exp2RK(fs, v); + break; + } + } +} + + +void luaK_posfix (FuncState *fs, BinOpr op, expdesc *e1, expdesc *e2) { + switch (op) { + case OPR_AND: { + lua_assert(e1->t == NO_JUMP); /* list must be closed */ + luaK_dischargevars(fs, e2); + luaK_concat(fs, &e1->f, e2->f); + e1->k = e2->k; e1->u.s.info = e2->u.s.info; + e1->u.s.aux = e2->u.s.aux; e1->t = e2->t; + break; + } + case OPR_OR: { + lua_assert(e1->f == NO_JUMP); /* list must be closed */ + luaK_dischargevars(fs, e2); + luaK_concat(fs, &e1->t, e2->t); + e1->k = e2->k; e1->u.s.info = e2->u.s.info; + e1->u.s.aux = e2->u.s.aux; e1->f = e2->f; + break; + } + case OPR_CONCAT: { + luaK_exp2val(fs, e2); + if (e2->k == VRELOCABLE && GET_OPCODE(getcode(fs, e2)) == OP_CONCAT) { + lua_assert(e1->u.s.info == GETARG_B(getcode(fs, e2))-1); + freeexp(fs, e1); + SETARG_B(getcode(fs, e2), e1->u.s.info); + e1->k = e2->k; e1->u.s.info = e2->u.s.info; + } + else { + luaK_exp2nextreg(fs, e2); /* operand must be on the 'stack' */ + codearith(fs, OP_CONCAT, e1, e2); + } + break; + } + case OPR_ADD: codearith(fs, OP_ADD, e1, e2); break; + case OPR_SUB: codearith(fs, OP_SUB, e1, e2); break; + case OPR_MUL: codearith(fs, OP_MUL, e1, e2); break; + case OPR_DIV: codearith(fs, OP_DIV, e1, e2); break; + case OPR_MOD: codearith(fs, OP_MOD, e1, e2); break; + case OPR_POW: codearith(fs, OP_POW, e1, e2); break; + case OPR_EQ: codecomp(fs, OP_EQ, 1, e1, e2); break; + case OPR_NE: codecomp(fs, OP_EQ, 0, e1, e2); break; + case OPR_LT: codecomp(fs, OP_LT, 1, e1, e2); break; + case OPR_LE: codecomp(fs, OP_LE, 1, e1, e2); break; + case OPR_GT: codecomp(fs, OP_LT, 0, e1, e2); break; + case OPR_GE: codecomp(fs, OP_LE, 0, e1, e2); break; + default: lua_assert(0); + } +} + + +void luaK_fixline (FuncState *fs, int line) { + fs->f->lineinfo[fs->pc - 1] = line; +} + + +static int luaK_code (FuncState *fs, Instruction i, int line) { + Proto *f = fs->f; + dischargejpc(fs); /* `pc' will change */ + /* put new instruction in code array */ + luaM_growvector(fs->L, f->code, fs->pc, f->sizecode, Instruction, + MAX_INT, "code size overflow"); + f->code[fs->pc] = i; + /* save corresponding line information */ + luaM_growvector(fs->L, f->lineinfo, fs->pc, f->sizelineinfo, int, + MAX_INT, "code size overflow"); + f->lineinfo[fs->pc] = line; + return fs->pc++; +} + + +int luaK_codeABC (FuncState *fs, OpCode o, int a, int b, int c) { + lua_assert(getOpMode(o) == iABC); + lua_assert(getBMode(o) != OpArgN || b == 0); + lua_assert(getCMode(o) != OpArgN || c == 0); + return luaK_code(fs, CREATE_ABC(o, a, b, c), fs->ls->lastline); +} + + +int luaK_codeABx (FuncState *fs, OpCode o, int a, unsigned int bc) { + lua_assert(getOpMode(o) == iABx || getOpMode(o) == iAsBx); + lua_assert(getCMode(o) == OpArgN); + return luaK_code(fs, CREATE_ABx(o, a, bc), fs->ls->lastline); +} + + +void luaK_setlist (FuncState *fs, int base, int nelems, int tostore) { + int c = (nelems - 1)/LFIELDS_PER_FLUSH + 1; + int b = (tostore == LUA_MULTRET) ? 0 : tostore; + lua_assert(tostore != 0); + if (c <= MAXARG_C) + luaK_codeABC(fs, OP_SETLIST, base, b, c); + else { + luaK_codeABC(fs, OP_SETLIST, base, b, 0); + luaK_code(fs, cast(Instruction, c), fs->ls->lastline); + } + fs->freereg = base + 1; /* free registers with list values */ +} + diff --git a/deps/lua/src/lcode.h b/deps/lua/src/lcode.h new file mode 100644 index 0000000000000000000000000000000000000000..b5668f22a61efe1f696506b049e5a988bbe5ce00 --- /dev/null +++ b/deps/lua/src/lcode.h @@ -0,0 +1,77 @@ +/* +** $Id: lcode.h,v 1.47 2005/11/08 19:44:31 roberto Exp $ +** Code generator for Lua +** See Copyright Notice in lua.h +*/ + +#ifndef lcode_h +#define lcode_h + +#include "llex.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" + + +/* +** Marks the end of a patch list. It is an invalid value both as an absolute +** address, and as a list link (would link an element to itself). +*/ +#define NO_JUMP (-1) + + +/* +** grep "ORDER OPR" if you change these enums +*/ +typedef enum BinOpr { + OPR_ADD, OPR_SUB, OPR_MUL, OPR_DIV, OPR_MOD, OPR_POW, + OPR_CONCAT, + OPR_NE, OPR_EQ, + OPR_LT, OPR_LE, OPR_GT, OPR_GE, + OPR_AND, OPR_OR, + OPR_NOBINOPR +} BinOpr; + +#define binopistest(op) ((op) >= OPR_NE) + +typedef enum UnOpr { OPR_MINUS, OPR_NOT, OPR_LEN, OPR_NOUNOPR } UnOpr; + + +#define getcode(fs,e) ((fs)->f->code[(e)->u.s.info]) + +#define luaK_codeAsBx(fs,o,A,sBx) luaK_codeABx(fs,o,A,(sBx)+MAXARG_sBx) + +#define luaK_setmultret(fs,e) luaK_setreturns(fs, e, LUA_MULTRET) + +LUAI_FUNC int luaK_codeABx (FuncState *fs, OpCode o, int A, unsigned int Bx); +LUAI_FUNC int luaK_codeABC (FuncState *fs, OpCode o, int A, int B, int C); +LUAI_FUNC void luaK_fixline (FuncState *fs, int line); +LUAI_FUNC void luaK_nil (FuncState *fs, int from, int n); +LUAI_FUNC void luaK_reserveregs (FuncState *fs, int n); +LUAI_FUNC void luaK_checkstack (FuncState *fs, int n); +LUAI_FUNC int luaK_stringK (FuncState *fs, TString *s); +LUAI_FUNC int luaK_numberK (FuncState *fs, lua_Number r); +LUAI_FUNC void luaK_dischargevars (FuncState *fs, expdesc *e); +LUAI_FUNC int luaK_exp2anyreg (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_exp2nextreg (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_exp2val (FuncState *fs, expdesc *e); +LUAI_FUNC int luaK_exp2RK (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_self (FuncState *fs, expdesc *e, expdesc *key); +LUAI_FUNC void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k); +LUAI_FUNC void luaK_goiftrue (FuncState *fs, expdesc *e); +LUAI_FUNC void luaK_storevar (FuncState *fs, expdesc *var, expdesc *e); +LUAI_FUNC void luaK_setreturns (FuncState *fs, expdesc *e, int nresults); +LUAI_FUNC void luaK_setoneret (FuncState *fs, expdesc *e); +LUAI_FUNC int luaK_jump (FuncState *fs); +LUAI_FUNC void luaK_ret (FuncState *fs, int first, int nret); +LUAI_FUNC void luaK_patchlist (FuncState *fs, int list, int target); +LUAI_FUNC void luaK_patchtohere (FuncState *fs, int list); +LUAI_FUNC void luaK_concat (FuncState *fs, int *l1, int l2); +LUAI_FUNC int luaK_getlabel (FuncState *fs); +LUAI_FUNC void luaK_prefix (FuncState *fs, UnOpr op, expdesc *v); +LUAI_FUNC void luaK_infix (FuncState *fs, BinOpr op, expdesc *v); +LUAI_FUNC void luaK_posfix (FuncState *fs, BinOpr op, expdesc *v1, expdesc *v2); +LUAI_FUNC void luaK_setlist (FuncState *fs, int base, int nelems, int tostore); + + +#endif diff --git a/deps/lua/src/ldblib.c b/deps/lua/src/ldblib.c new file mode 100644 index 0000000000000000000000000000000000000000..26a19b6a6f6f04d9ee74dc49c32608feadbfca38 --- /dev/null +++ b/deps/lua/src/ldblib.c @@ -0,0 +1,397 @@ +/* +** $Id: ldblib.c,v 1.104 2005/12/29 15:32:11 roberto Exp $ +** Interface from Lua to its debug API +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define ldblib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + +static int db_getregistry (lua_State *L) { + lua_pushvalue(L, LUA_REGISTRYINDEX); + return 1; +} + + +static int db_getmetatable (lua_State *L) { + luaL_checkany(L, 1); + if (!lua_getmetatable(L, 1)) { + lua_pushnil(L); /* no metatable */ + } + return 1; +} + + +static int db_setmetatable (lua_State *L) { + int t = lua_type(L, 2); + luaL_argcheck(L, t == LUA_TNIL || t == LUA_TTABLE, 2, + "nil or table expected"); + lua_settop(L, 2); + lua_pushboolean(L, lua_setmetatable(L, 1)); + return 1; +} + + +static int db_getfenv (lua_State *L) { + lua_getfenv(L, 1); + return 1; +} + + +static int db_setfenv (lua_State *L) { + luaL_checktype(L, 2, LUA_TTABLE); + lua_settop(L, 2); + if (lua_setfenv(L, 1) == 0) + luaL_error(L, LUA_QL("setfenv") + " cannot change environment of given object"); + return 1; +} + + +static void settabss (lua_State *L, const char *i, const char *v) { + lua_pushstring(L, v); + lua_setfield(L, -2, i); +} + + +static void settabsi (lua_State *L, const char *i, int v) { + lua_pushinteger(L, v); + lua_setfield(L, -2, i); +} + + +static lua_State *getthread (lua_State *L, int *arg) { + if (lua_isthread(L, 1)) { + *arg = 1; + return lua_tothread(L, 1); + } + else { + *arg = 0; + return L; + } +} + + +static void treatstackoption (lua_State *L, lua_State *L1, const char *fname) { + if (L == L1) { + lua_pushvalue(L, -2); + lua_remove(L, -3); + } + else + lua_xmove(L1, L, 1); + lua_setfield(L, -2, fname); +} + + +static int db_getinfo (lua_State *L) { + lua_Debug ar; + int arg; + lua_State *L1 = getthread(L, &arg); + const char *options = luaL_optstring(L, arg+2, "flnSu"); + if (lua_isnumber(L, arg+1)) { + if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), &ar)) { + lua_pushnil(L); /* level out of range */ + return 1; + } + } + else if (lua_isfunction(L, arg+1)) { + lua_pushfstring(L, ">%s", options); + options = lua_tostring(L, -1); + lua_pushvalue(L, arg+1); + lua_xmove(L, L1, 1); + } + else + return luaL_argerror(L, arg+1, "function or level expected"); + if (!lua_getinfo(L1, options, &ar)) + return luaL_argerror(L, arg+2, "invalid option"); + lua_createtable(L, 0, 2); + if (strchr(options, 'S')) { + settabss(L, "source", ar.source); + settabss(L, "short_src", ar.short_src); + settabsi(L, "linedefined", ar.linedefined); + settabsi(L, "lastlinedefined", ar.lastlinedefined); + settabss(L, "what", ar.what); + } + if (strchr(options, 'l')) + settabsi(L, "currentline", ar.currentline); + if (strchr(options, 'u')) + settabsi(L, "nups", ar.nups); + if (strchr(options, 'n')) { + settabss(L, "name", ar.name); + settabss(L, "namewhat", ar.namewhat); + } + if (strchr(options, 'L')) + treatstackoption(L, L1, "activelines"); + if (strchr(options, 'f')) + treatstackoption(L, L1, "func"); + return 1; /* return table */ +} + + +static int db_getlocal (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + const char *name; + if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */ + return luaL_argerror(L, arg+1, "level out of range"); + name = lua_getlocal(L1, &ar, luaL_checkint(L, arg+2)); + if (name) { + lua_xmove(L1, L, 1); + lua_pushstring(L, name); + lua_pushvalue(L, -2); + return 2; + } + else { + lua_pushnil(L); + return 1; + } +} + + +static int db_setlocal (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */ + return luaL_argerror(L, arg+1, "level out of range"); + luaL_checkany(L, arg+3); + lua_settop(L, arg+3); + lua_xmove(L, L1, 1); + lua_pushstring(L, lua_setlocal(L1, &ar, luaL_checkint(L, arg+2))); + return 1; +} + + +static int auxupvalue (lua_State *L, int get) { + const char *name; + int n = luaL_checkint(L, 2); + luaL_checktype(L, 1, LUA_TFUNCTION); + if (lua_iscfunction(L, 1)) return 0; /* cannot touch C upvalues from Lua */ + name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n); + if (name == NULL) return 0; + lua_pushstring(L, name); + lua_insert(L, -(get+1)); + return get + 1; +} + + +static int db_getupvalue (lua_State *L) { + return auxupvalue(L, 1); +} + + +static int db_setupvalue (lua_State *L) { + luaL_checkany(L, 3); + return auxupvalue(L, 0); +} + + + +static const char KEY_HOOK = 'h'; + + +static void hookf (lua_State *L, lua_Debug *ar) { + static const char *const hooknames[] = + {"call", "return", "line", "count", "tail return"}; + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); + lua_pushlightuserdata(L, L); + lua_rawget(L, -2); + if (lua_isfunction(L, -1)) { + lua_pushstring(L, hooknames[(int)ar->event]); + if (ar->currentline >= 0) + lua_pushinteger(L, ar->currentline); + else lua_pushnil(L); + lua_assert(lua_getinfo(L, "lS", ar)); + lua_call(L, 2, 0); + } +} + + +static int makemask (const char *smask, int count) { + int mask = 0; + if (strchr(smask, 'c')) mask |= LUA_MASKCALL; + if (strchr(smask, 'r')) mask |= LUA_MASKRET; + if (strchr(smask, 'l')) mask |= LUA_MASKLINE; + if (count > 0) mask |= LUA_MASKCOUNT; + return mask; +} + + +static char *unmakemask (int mask, char *smask) { + int i = 0; + if (mask & LUA_MASKCALL) smask[i++] = 'c'; + if (mask & LUA_MASKRET) smask[i++] = 'r'; + if (mask & LUA_MASKLINE) smask[i++] = 'l'; + smask[i] = '\0'; + return smask; +} + + +static void gethooktable (lua_State *L) { + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_rawget(L, LUA_REGISTRYINDEX); + if (!lua_istable(L, -1)) { + lua_pop(L, 1); + lua_createtable(L, 0, 1); + lua_pushlightuserdata(L, (void *)&KEY_HOOK); + lua_pushvalue(L, -2); + lua_rawset(L, LUA_REGISTRYINDEX); + } +} + + +static int db_sethook (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + if (lua_isnoneornil(L, arg+1)) { + lua_settop(L, arg+1); + lua_sethook(L1, NULL, 0, 0); /* turn off hooks */ + } + else { + const char *smask = luaL_checkstring(L, arg+2); + int count = luaL_optint(L, arg+3, 0); + luaL_checktype(L, arg+1, LUA_TFUNCTION); + lua_sethook(L1, hookf, makemask(smask, count), count); + } + gethooktable(L1); + lua_pushlightuserdata(L1, L1); + lua_pushvalue(L, arg+1); + lua_xmove(L, L1, 1); + lua_rawset(L1, -3); /* set new hook */ + lua_pop(L1, 1); /* remove hook table */ + return 0; +} + + +static int db_gethook (lua_State *L) { + int arg; + lua_State *L1 = getthread(L, &arg); + char buff[5]; + int mask = lua_gethookmask(L1); + lua_Hook hook = lua_gethook(L1); + if (hook != NULL && hook != hookf) /* external hook? */ + lua_pushliteral(L, "external hook"); + else { + gethooktable(L1); + lua_pushlightuserdata(L1, L1); + lua_rawget(L1, -2); /* get hook */ + lua_remove(L1, -2); /* remove hook table */ + lua_xmove(L1, L, 1); + } + lua_pushstring(L, unmakemask(mask, buff)); + lua_pushinteger(L, lua_gethookcount(L1)); + return 3; +} + + +static int db_debug (lua_State *L) { + for (;;) { + char buffer[250]; + fputs("lua_debug> ", stderr); + if (fgets(buffer, sizeof(buffer), stdin) == 0 || + strcmp(buffer, "cont\n") == 0) + return 0; + if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") || + lua_pcall(L, 0, 0, 0)) { + fputs(lua_tostring(L, -1), stderr); + fputs("\n", stderr); + } + lua_settop(L, 0); /* remove eventual returns */ + } +} + + +#define LEVELS1 12 /* size of the first part of the stack */ +#define LEVELS2 10 /* size of the second part of the stack */ + +static int db_errorfb (lua_State *L) { + int level; + int firstpart = 1; /* still before eventual `...' */ + int arg; + lua_State *L1 = getthread(L, &arg); + lua_Debug ar; + if (lua_isnumber(L, arg+2)) { + level = (int)lua_tointeger(L, arg+2); + lua_pop(L, 1); + } + else + level = (L == L1) ? 1 : 0; /* level 0 may be this own function */ + if (lua_gettop(L) == arg) + lua_pushliteral(L, ""); + else if (!lua_isstring(L, arg+1)) return 1; /* message is not a string */ + else lua_pushliteral(L, "\n"); + lua_pushliteral(L, "stack traceback:"); + while (lua_getstack(L1, level++, &ar)) { + if (level > LEVELS1 && firstpart) { + /* no more than `LEVELS2' more levels? */ + if (!lua_getstack(L1, level+LEVELS2, &ar)) + level--; /* keep going */ + else { + lua_pushliteral(L, "\n\t..."); /* too many levels */ + while (lua_getstack(L1, level+LEVELS2, &ar)) /* find last levels */ + level++; + } + firstpart = 0; + continue; + } + lua_pushliteral(L, "\n\t"); + lua_getinfo(L1, "Snl", &ar); + lua_pushfstring(L, "%s:", ar.short_src); + if (ar.currentline > 0) + lua_pushfstring(L, "%d:", ar.currentline); + if (*ar.namewhat != '\0') /* is there a name? */ + lua_pushfstring(L, " in function " LUA_QS, ar.name); + else { + if (*ar.what == 'm') /* main? */ + lua_pushfstring(L, " in main chunk"); + else if (*ar.what == 'C' || *ar.what == 't') + lua_pushliteral(L, " ?"); /* C function or tail call */ + else + lua_pushfstring(L, " in function <%s:%d>", + ar.short_src, ar.linedefined); + } + lua_concat(L, lua_gettop(L) - arg); + } + lua_concat(L, lua_gettop(L) - arg); + return 1; +} + + +static const luaL_Reg dblib[] = { + {"debug", db_debug}, + {"getfenv", db_getfenv}, + {"gethook", db_gethook}, + {"getinfo", db_getinfo}, + {"getlocal", db_getlocal}, + {"getregistry", db_getregistry}, + {"getmetatable", db_getmetatable}, + {"getupvalue", db_getupvalue}, + {"setfenv", db_setfenv}, + {"sethook", db_sethook}, + {"setlocal", db_setlocal}, + {"setmetatable", db_setmetatable}, + {"setupvalue", db_setupvalue}, + {"traceback", db_errorfb}, + {NULL, NULL} +}; + + +LUALIB_API int luaopen_debug (lua_State *L) { + luaL_register(L, LUA_DBLIBNAME, dblib); + return 1; +} + diff --git a/deps/lua/src/ldebug.c b/deps/lua/src/ldebug.c new file mode 100644 index 0000000000000000000000000000000000000000..8919a017393e043ddb786449d912be4b52e7f564 --- /dev/null +++ b/deps/lua/src/ldebug.c @@ -0,0 +1,620 @@ +/* +** $Id: ldebug.c,v 2.29 2005/12/22 16:19:56 roberto Exp $ +** Debug Interface +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + + +#define ldebug_c +#define LUA_CORE + +#include "lua.h" + +#include "lapi.h" +#include "lcode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lvm.h" + + + +static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name); + + +static int currentpc (lua_State *L, CallInfo *ci) { + if (!isLua(ci)) return -1; /* function is not a Lua function? */ + if (ci == L->ci) + ci->savedpc = L->savedpc; + return pcRel(ci->savedpc, ci_func(ci)->l.p); +} + + +static int currentline (lua_State *L, CallInfo *ci) { + int pc = currentpc(L, ci); + if (pc < 0) + return -1; /* only active lua functions have current-line information */ + else + return getline(ci_func(ci)->l.p, pc); +} + + +/* +** this function can be called asynchronous (e.g. during a signal) +*/ +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count) { + if (func == NULL || mask == 0) { /* turn off hooks? */ + mask = 0; + func = NULL; + } + L->hook = func; + L->basehookcount = count; + resethookcount(L); + L->hookmask = cast_byte(mask); + return 1; +} + + +LUA_API lua_Hook lua_gethook (lua_State *L) { + return L->hook; +} + + +LUA_API int lua_gethookmask (lua_State *L) { + return L->hookmask; +} + + +LUA_API int lua_gethookcount (lua_State *L) { + return L->basehookcount; +} + + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) { + int status; + CallInfo *ci; + lua_lock(L); + for (ci = L->ci; level > 0 && ci > L->base_ci; ci--) { + level--; + if (f_isLua(ci)) /* Lua function? */ + level -= ci->tailcalls; /* skip lost tail calls */ + } + if (level == 0 && ci > L->base_ci) { /* level found? */ + status = 1; + ar->i_ci = cast_int(ci - L->base_ci); + } + else if (level < 0) { /* level is of a lost tail call? */ + status = 1; + ar->i_ci = 0; + } + else status = 0; /* no such level */ + lua_unlock(L); + return status; +} + + +static Proto *getluaproto (CallInfo *ci) { + return (isLua(ci) ? ci_func(ci)->l.p : NULL); +} + + +static const char *findlocal (lua_State *L, CallInfo *ci, int n) { + const char *name; + Proto *fp = getluaproto(ci); + if (fp && (name = luaF_getlocalname(fp, n, currentpc(L, ci))) != NULL) + return name; /* is a local variable in a Lua function */ + else { + StkId limit = (ci == L->ci) ? L->top : (ci+1)->func; + if (limit - ci->base >= n && n > 0) /* is 'n' inside 'ci' stack? */ + return "(*temporary)"; + else + return NULL; + } +} + + +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) { + CallInfo *ci = L->base_ci + ar->i_ci; + const char *name = findlocal(L, ci, n); + lua_lock(L); + if (name) + luaA_pushobject(L, ci->base + (n - 1)); + lua_unlock(L); + return name; +} + + +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) { + CallInfo *ci = L->base_ci + ar->i_ci; + const char *name = findlocal(L, ci, n); + lua_lock(L); + if (name) + setobjs2s(L, ci->base + (n - 1), L->top - 1); + L->top--; /* pop value */ + lua_unlock(L); + return name; +} + + +static void funcinfo (lua_Debug *ar, Closure *cl) { + if (cl->c.isC) { + ar->source = "=[C]"; + ar->linedefined = -1; + ar->lastlinedefined = -1; + ar->what = "C"; + } + else { + ar->source = getstr(cl->l.p->source); + ar->linedefined = cl->l.p->linedefined; + ar->lastlinedefined = cl->l.p->lastlinedefined; + ar->what = (ar->linedefined == 0) ? "main" : "Lua"; + } + luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE); +} + + +static void info_tailcall (lua_Debug *ar) { + ar->name = ar->namewhat = ""; + ar->what = "tail"; + ar->lastlinedefined = ar->linedefined = ar->currentline = -1; + ar->source = "=(tail call)"; + luaO_chunkid(ar->short_src, ar->source, LUA_IDSIZE); + ar->nups = 0; +} + + +static void collectvalidlines (lua_State *L, Closure *f) { + if (f == NULL || f->c.isC) { + setnilvalue(L->top); + } + else { + Table *t = luaH_new(L, 0, 0); + int *lineinfo = f->l.p->lineinfo; + int i; + for (i=0; il.p->sizelineinfo; i++) + setbvalue(luaH_setnum(L, t, lineinfo[i]), 1); + sethvalue(L, L->top, t); + } + incr_top(L); +} + + +static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar, + Closure *f, CallInfo *ci) { + int status = 1; + if (f == NULL) { + info_tailcall(ar); + return status; + } + for (; *what; what++) { + switch (*what) { + case 'S': { + funcinfo(ar, f); + break; + } + case 'l': { + ar->currentline = (ci) ? currentline(L, ci) : -1; + break; + } + case 'u': { + ar->nups = f->c.nupvalues; + break; + } + case 'n': { + ar->namewhat = (ci) ? getfuncname(L, ci, &ar->name) : NULL; + if (ar->namewhat == NULL) { + ar->namewhat = ""; /* not found */ + ar->name = NULL; + } + break; + } + case 'L': + case 'f': /* handled by lua_getinfo */ + break; + default: status = 0; /* invalid option */ + } + } + return status; +} + + +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) { + int status; + Closure *f = NULL; + CallInfo *ci = NULL; + lua_lock(L); + if (*what == '>') { + StkId func = L->top - 1; + luai_apicheck(L, ttisfunction(func)); + what++; /* skip the '>' */ + f = clvalue(func); + L->top--; /* pop function */ + } + else if (ar->i_ci != 0) { /* no tail call? */ + ci = L->base_ci + ar->i_ci; + lua_assert(ttisfunction(ci->func)); + f = clvalue(ci->func); + } + status = auxgetinfo(L, what, ar, f, ci); + if (strchr(what, 'f')) { + if (f == NULL) setnilvalue(L->top); + else setclvalue(L, L->top, f); + incr_top(L); + } + if (strchr(what, 'L')) + collectvalidlines(L, f); + lua_unlock(L); + return status; +} + + +/* +** {====================================================== +** Symbolic Execution and code checker +** ======================================================= +*/ + +#define check(x) if (!(x)) return 0; + +#define checkjump(pt,pc) check(0 <= pc && pc < pt->sizecode) + +#define checkreg(pt,reg) check((reg) < (pt)->maxstacksize) + + + +static int precheck (const Proto *pt) { + check(pt->maxstacksize <= MAXSTACK); + lua_assert(pt->numparams+(pt->is_vararg & VARARG_HASARG) <= pt->maxstacksize); + lua_assert(!(pt->is_vararg & VARARG_NEEDSARG) || + (pt->is_vararg & VARARG_HASARG)); + check(pt->sizeupvalues <= pt->nups); + check(pt->sizelineinfo == pt->sizecode || pt->sizelineinfo == 0); + check(GET_OPCODE(pt->code[pt->sizecode-1]) == OP_RETURN); + return 1; +} + + +#define checkopenop(pt,pc) luaG_checkopenop((pt)->code[(pc)+1]) + +int luaG_checkopenop (Instruction i) { + switch (GET_OPCODE(i)) { + case OP_CALL: + case OP_TAILCALL: + case OP_RETURN: + case OP_SETLIST: { + check(GETARG_B(i) == 0); + return 1; + } + default: return 0; /* invalid instruction after an open call */ + } +} + + +static int checkArgMode (const Proto *pt, int r, enum OpArgMask mode) { + switch (mode) { + case OpArgN: check(r == 0); break; + case OpArgU: break; + case OpArgR: checkreg(pt, r); break; + case OpArgK: + check(ISK(r) ? INDEXK(r) < pt->sizek : r < pt->maxstacksize); + break; + } + return 1; +} + + +static Instruction symbexec (const Proto *pt, int lastpc, int reg) { + int pc; + int last; /* stores position of last instruction that changed `reg' */ + last = pt->sizecode-1; /* points to final return (a `neutral' instruction) */ + check(precheck(pt)); + for (pc = 0; pc < lastpc; pc++) { + Instruction i = pt->code[pc]; + OpCode op = GET_OPCODE(i); + int a = GETARG_A(i); + int b = 0; + int c = 0; + check(op < NUM_OPCODES); + checkreg(pt, a); + switch (getOpMode(op)) { + case iABC: { + b = GETARG_B(i); + c = GETARG_C(i); + check(checkArgMode(pt, b, getBMode(op))); + check(checkArgMode(pt, c, getCMode(op))); + break; + } + case iABx: { + b = GETARG_Bx(i); + if (getBMode(op) == OpArgK) check(b < pt->sizek); + break; + } + case iAsBx: { + b = GETARG_sBx(i); + if (getBMode(op) == OpArgR) { + int dest = pc+1+b; + check(0 <= dest && dest < pt->sizecode); + if (dest > 0) { + /* cannot jump to a setlist count */ + Instruction d = pt->code[dest-1]; + check(!(GET_OPCODE(d) == OP_SETLIST && GETARG_C(d) == 0)); + } + } + break; + } + } + if (testAMode(op)) { + if (a == reg) last = pc; /* change register `a' */ + } + if (testTMode(op)) { + check(pc+2 < pt->sizecode); /* check skip */ + check(GET_OPCODE(pt->code[pc+1]) == OP_JMP); + } + switch (op) { + case OP_LOADBOOL: { + check(c == 0 || pc+2 < pt->sizecode); /* check its jump */ + break; + } + case OP_LOADNIL: { + if (a <= reg && reg <= b) + last = pc; /* set registers from `a' to `b' */ + break; + } + case OP_GETUPVAL: + case OP_SETUPVAL: { + check(b < pt->nups); + break; + } + case OP_GETGLOBAL: + case OP_SETGLOBAL: { + check(ttisstring(&pt->k[b])); + break; + } + case OP_SELF: { + checkreg(pt, a+1); + if (reg == a+1) last = pc; + break; + } + case OP_CONCAT: { + check(b < c); /* at least two operands */ + break; + } + case OP_TFORLOOP: { + check(c >= 1); /* at least one result (control variable) */ + checkreg(pt, a+2+c); /* space for results */ + if (reg >= a+2) last = pc; /* affect all regs above its base */ + break; + } + case OP_FORLOOP: + case OP_FORPREP: + checkreg(pt, a+3); + /* go through */ + case OP_JMP: { + int dest = pc+1+b; + /* not full check and jump is forward and do not skip `lastpc'? */ + if (reg != NO_REG && pc < dest && dest <= lastpc) + pc += b; /* do the jump */ + break; + } + case OP_CALL: + case OP_TAILCALL: { + if (b != 0) { + checkreg(pt, a+b-1); + } + c--; /* c = num. returns */ + if (c == LUA_MULTRET) { + check(checkopenop(pt, pc)); + } + else if (c != 0) + checkreg(pt, a+c-1); + if (reg >= a) last = pc; /* affect all registers above base */ + break; + } + case OP_RETURN: { + b--; /* b = num. returns */ + if (b > 0) checkreg(pt, a+b-1); + break; + } + case OP_SETLIST: { + if (b > 0) checkreg(pt, a + b); + if (c == 0) pc++; + break; + } + case OP_CLOSURE: { + int nup; + check(b < pt->sizep); + nup = pt->p[b]->nups; + check(pc + nup < pt->sizecode); + for (; nup>0; nup--) { + OpCode op1 = GET_OPCODE(pt->code[pc+nup]); + check(op1 == OP_GETUPVAL || op1 == OP_MOVE); + } + break; + } + case OP_VARARG: { + check((pt->is_vararg & VARARG_ISVARARG) && + !(pt->is_vararg & VARARG_NEEDSARG)); + b--; + if (b == LUA_MULTRET) check(checkopenop(pt, pc)); + checkreg(pt, a+b-1); + break; + } + default: break; + } + } + return pt->code[last]; +} + +#undef check +#undef checkjump +#undef checkreg + +/* }====================================================== */ + + +int luaG_checkcode (const Proto *pt) { + return (symbexec(pt, pt->sizecode, NO_REG) != 0); +} + + +static const char *kname (Proto *p, int c) { + if (ISK(c) && ttisstring(&p->k[INDEXK(c)])) + return svalue(&p->k[INDEXK(c)]); + else + return "?"; +} + + +static const char *getobjname (lua_State *L, CallInfo *ci, int stackpos, + const char **name) { + if (isLua(ci)) { /* a Lua function? */ + Proto *p = ci_func(ci)->l.p; + int pc = currentpc(L, ci); + Instruction i; + *name = luaF_getlocalname(p, stackpos+1, pc); + if (*name) /* is a local? */ + return "local"; + i = symbexec(p, pc, stackpos); /* try symbolic execution */ + lua_assert(pc != -1); + switch (GET_OPCODE(i)) { + case OP_GETGLOBAL: { + int g = GETARG_Bx(i); /* global index */ + lua_assert(ttisstring(&p->k[g])); + *name = svalue(&p->k[g]); + return "global"; + } + case OP_MOVE: { + int a = GETARG_A(i); + int b = GETARG_B(i); /* move from `b' to `a' */ + if (b < a) + return getobjname(L, ci, b, name); /* get name for `b' */ + break; + } + case OP_GETTABLE: { + int k = GETARG_C(i); /* key index */ + *name = kname(p, k); + return "field"; + } + case OP_GETUPVAL: { + int u = GETARG_B(i); /* upvalue index */ + *name = p->upvalues ? getstr(p->upvalues[u]) : "?"; + return "upvalue"; + } + case OP_SELF: { + int k = GETARG_C(i); /* key index */ + *name = kname(p, k); + return "method"; + } + default: break; + } + } + return NULL; /* no useful name found */ +} + + +static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) { + Instruction i; + if ((isLua(ci) && ci->tailcalls > 0) || !isLua(ci - 1)) + return NULL; /* calling function is not Lua (or is unknown) */ + ci--; /* calling function */ + i = ci_func(ci)->l.p->code[currentpc(L, ci)]; + if (GET_OPCODE(i) == OP_CALL || GET_OPCODE(i) == OP_TAILCALL || + GET_OPCODE(i) == OP_TFORLOOP) + return getobjname(L, ci, GETARG_A(i), name); + else + return NULL; /* no useful name can be found */ +} + + +/* only ANSI way to check whether a pointer points to an array */ +static int isinstack (CallInfo *ci, const TValue *o) { + StkId p; + for (p = ci->base; p < ci->top; p++) + if (o == p) return 1; + return 0; +} + + +void luaG_typeerror (lua_State *L, const TValue *o, const char *op) { + const char *name = NULL; + const char *t = luaT_typenames[ttype(o)]; + const char *kind = (isinstack(L->ci, o)) ? + getobjname(L, L->ci, cast_int(o - L->base), &name) : + NULL; + if (kind) + luaG_runerror(L, "attempt to %s %s " LUA_QS " (a %s value)", + op, kind, name, t); + else + luaG_runerror(L, "attempt to %s a %s value", op, t); +} + + +void luaG_concaterror (lua_State *L, StkId p1, StkId p2) { + if (ttisstring(p1)) p1 = p2; + lua_assert(!ttisstring(p1)); + luaG_typeerror(L, p1, "concatenate"); +} + + +void luaG_aritherror (lua_State *L, const TValue *p1, const TValue *p2) { + TValue temp; + if (luaV_tonumber(p1, &temp) == NULL) + p2 = p1; /* first operand is wrong */ + luaG_typeerror(L, p2, "perform arithmetic on"); +} + + +int luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) { + const char *t1 = luaT_typenames[ttype(p1)]; + const char *t2 = luaT_typenames[ttype(p2)]; + if (t1[2] == t2[2]) + luaG_runerror(L, "attempt to compare two %s values", t1); + else + luaG_runerror(L, "attempt to compare %s with %s", t1, t2); + return 0; +} + + +static void addinfo (lua_State *L, const char *msg) { + CallInfo *ci = L->ci; + if (isLua(ci)) { /* is Lua code? */ + char buff[LUA_IDSIZE]; /* add file:line information */ + int line = currentline(L, ci); + luaO_chunkid(buff, getstr(getluaproto(ci)->source), LUA_IDSIZE); + luaO_pushfstring(L, "%s:%d: %s", buff, line, msg); + } +} + + +void luaG_errormsg (lua_State *L) { + if (L->errfunc != 0) { /* is there an error handling function? */ + StkId errfunc = restorestack(L, L->errfunc); + if (!ttisfunction(errfunc)) luaD_throw(L, LUA_ERRERR); + setobjs2s(L, L->top, L->top - 1); /* move argument */ + setobjs2s(L, L->top - 1, errfunc); /* push function */ + incr_top(L); + luaD_call(L, L->top - 2, 1); /* call it */ + } + luaD_throw(L, LUA_ERRRUN); +} + + +void luaG_runerror (lua_State *L, const char *fmt, ...) { + va_list argp; + va_start(argp, fmt); + addinfo(L, luaO_pushvfstring(L, fmt, argp)); + va_end(argp); + luaG_errormsg(L); +} + diff --git a/deps/lua/src/ldebug.h b/deps/lua/src/ldebug.h new file mode 100644 index 0000000000000000000000000000000000000000..9c76aa10f0a2bdb26f1c89e05609ba9556a8114e --- /dev/null +++ b/deps/lua/src/ldebug.h @@ -0,0 +1,33 @@ +/* +** $Id: ldebug.h,v 2.3 2005/04/25 19:24:10 roberto Exp $ +** Auxiliary functions from Debug Interface module +** See Copyright Notice in lua.h +*/ + +#ifndef ldebug_h +#define ldebug_h + + +#include "lstate.h" + + +#define pcRel(pc, p) (cast(int, (pc) - (p)->code) - 1) + +#define getline(f,pc) (((f)->lineinfo) ? (f)->lineinfo[pc] : 0) + +#define resethookcount(L) (L->hookcount = L->basehookcount) + + +LUAI_FUNC void luaG_typeerror (lua_State *L, const TValue *o, + const char *opname); +LUAI_FUNC void luaG_concaterror (lua_State *L, StkId p1, StkId p2); +LUAI_FUNC void luaG_aritherror (lua_State *L, const TValue *p1, + const TValue *p2); +LUAI_FUNC int luaG_ordererror (lua_State *L, const TValue *p1, + const TValue *p2); +LUAI_FUNC void luaG_runerror (lua_State *L, const char *fmt, ...); +LUAI_FUNC void luaG_errormsg (lua_State *L); +LUAI_FUNC int luaG_checkcode (const Proto *pt); +LUAI_FUNC int luaG_checkopenop (Instruction i); + +#endif diff --git a/deps/lua/src/ldo.c b/deps/lua/src/ldo.c new file mode 100644 index 0000000000000000000000000000000000000000..b8eb1a8a595c4ba85f8db3d3c2f3f9dad1c6692c --- /dev/null +++ b/deps/lua/src/ldo.c @@ -0,0 +1,515 @@ +/* +** $Id: ldo.c,v 2.37 2005/12/22 16:19:56 roberto Exp $ +** Stack and Call structure of Lua +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define ldo_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lundump.h" +#include "lvm.h" +#include "lzio.h" + + + + +/* +** {====================================================== +** Error-recovery functions +** ======================================================= +*/ + + +/* chain list of long jump buffers */ +struct lua_longjmp { + struct lua_longjmp *previous; + luai_jmpbuf b; + volatile int status; /* error code */ +}; + + +void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) { + switch (errcode) { + case LUA_ERRMEM: { + setsvalue2s(L, oldtop, luaS_newliteral(L, MEMERRMSG)); + break; + } + case LUA_ERRERR: { + setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling")); + break; + } + case LUA_ERRSYNTAX: + case LUA_ERRRUN: { + setobjs2s(L, oldtop, L->top - 1); /* error message on current top */ + break; + } + } + L->top = oldtop + 1; +} + + +static void restore_stack_limit (lua_State *L) { + lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK - 1); + if (L->size_ci > LUAI_MAXCALLS) { /* there was an overflow? */ + int inuse = cast_int(L->ci - L->base_ci); + if (inuse + 1 < LUAI_MAXCALLS) /* can `undo' overflow? */ + luaD_reallocCI(L, LUAI_MAXCALLS); + } +} + + +static void resetstack (lua_State *L, int status) { + L->ci = L->base_ci; + L->base = L->ci->base; + luaF_close(L, L->base); /* close eventual pending closures */ + luaD_seterrorobj(L, status, L->base); + L->nCcalls = 0; + L->allowhook = 1; + restore_stack_limit(L); + L->errfunc = 0; + L->errorJmp = NULL; +} + + +void luaD_throw (lua_State *L, int errcode) { + if (L->errorJmp) { + L->errorJmp->status = errcode; + LUAI_THROW(L, L->errorJmp); + } + else { + L->status = cast_byte(errcode); + if (G(L)->panic) { + resetstack(L, errcode); + lua_unlock(L); + G(L)->panic(L); + } + exit(EXIT_FAILURE); + } +} + + +int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) { + struct lua_longjmp lj; + lj.status = 0; + lj.previous = L->errorJmp; /* chain new error handler */ + L->errorJmp = &lj; + LUAI_TRY(L, &lj, + (*f)(L, ud); + ); + L->errorJmp = lj.previous; /* restore old error handler */ + return lj.status; +} + +/* }====================================================== */ + + +static void correctstack (lua_State *L, TValue *oldstack) { + CallInfo *ci; + GCObject *up; + L->top = (L->top - oldstack) + L->stack; + for (up = L->openupval; up != NULL; up = up->gch.next) + gco2uv(up)->v = (gco2uv(up)->v - oldstack) + L->stack; + for (ci = L->base_ci; ci <= L->ci; ci++) { + ci->top = (ci->top - oldstack) + L->stack; + ci->base = (ci->base - oldstack) + L->stack; + ci->func = (ci->func - oldstack) + L->stack; + } + L->base = (L->base - oldstack) + L->stack; +} + + +void luaD_reallocstack (lua_State *L, int newsize) { + TValue *oldstack = L->stack; + int realsize = newsize + 1 + EXTRA_STACK; + lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK - 1); + luaM_reallocvector(L, L->stack, L->stacksize, realsize, TValue); + L->stacksize = realsize; + L->stack_last = L->stack+newsize; + correctstack(L, oldstack); +} + + +void luaD_reallocCI (lua_State *L, int newsize) { + CallInfo *oldci = L->base_ci; + luaM_reallocvector(L, L->base_ci, L->size_ci, newsize, CallInfo); + L->size_ci = newsize; + L->ci = (L->ci - oldci) + L->base_ci; + L->end_ci = L->base_ci + L->size_ci - 1; +} + + +void luaD_growstack (lua_State *L, int n) { + if (n <= L->stacksize) /* double size is enough? */ + luaD_reallocstack(L, 2*L->stacksize); + else + luaD_reallocstack(L, L->stacksize + n); +} + + +static CallInfo *growCI (lua_State *L) { + if (L->size_ci > LUAI_MAXCALLS) /* overflow while handling overflow? */ + luaD_throw(L, LUA_ERRERR); + else { + luaD_reallocCI(L, 2*L->size_ci); + if (L->size_ci > LUAI_MAXCALLS) + luaG_runerror(L, "stack overflow"); + } + return ++L->ci; +} + + +void luaD_callhook (lua_State *L, int event, int line) { + lua_Hook hook = L->hook; + if (hook && L->allowhook) { + ptrdiff_t top = savestack(L, L->top); + ptrdiff_t ci_top = savestack(L, L->ci->top); + lua_Debug ar; + ar.event = event; + ar.currentline = line; + if (event == LUA_HOOKTAILRET) + ar.i_ci = 0; /* tail call; no debug information about it */ + else + ar.i_ci = cast_int(L->ci - L->base_ci); + luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ + L->ci->top = L->top + LUA_MINSTACK; + lua_assert(L->ci->top <= L->stack_last); + L->allowhook = 0; /* cannot call hooks inside a hook */ + lua_unlock(L); + (*hook)(L, &ar); + lua_lock(L); + lua_assert(!L->allowhook); + L->allowhook = 1; + L->ci->top = restorestack(L, ci_top); + L->top = restorestack(L, top); + } +} + + +static StkId adjust_varargs (lua_State *L, Proto *p, int actual) { + int i; + int nfixargs = p->numparams; + Table *htab = NULL; + StkId base, fixed; + for (; actual < nfixargs; ++actual) + setnilvalue(L->top++); +#if defined(LUA_COMPAT_VARARG) + if (p->is_vararg & VARARG_NEEDSARG) { /* compat. with old-style vararg? */ + int nvar = actual - nfixargs; /* number of extra arguments */ + lua_assert(p->is_vararg & VARARG_HASARG); + luaC_checkGC(L); + htab = luaH_new(L, nvar, 1); /* create `arg' table */ + for (i=0; itop - nvar + i); + /* store counter in field `n' */ + setnvalue(luaH_setstr(L, htab, luaS_newliteral(L, "n")), cast_num(nvar)); + } +#endif + /* move fixed parameters to final position */ + fixed = L->top - actual; /* first fixed argument */ + base = L->top; /* final position of first argument */ + for (i=0; itop++, fixed+i); + setnilvalue(fixed+i); + } + /* add `arg' parameter */ + if (htab) { + sethvalue(L, L->top++, htab); + lua_assert(iswhite(obj2gco(htab))); + } + return base; +} + + +static StkId tryfuncTM (lua_State *L, StkId func) { + const TValue *tm = luaT_gettmbyobj(L, func, TM_CALL); + StkId p; + ptrdiff_t funcr = savestack(L, func); + if (!ttisfunction(tm)) + luaG_typeerror(L, func, "call"); + /* Open a hole inside the stack at `func' */ + for (p = L->top; p > func; p--) setobjs2s(L, p, p-1); + incr_top(L); + func = restorestack(L, funcr); /* previous call may change stack */ + setobj2s(L, func, tm); /* tag method is the new function to be called */ + return func; +} + + + +#define inc_ci(L) \ + ((L->ci == L->end_ci) ? growCI(L) : \ + (condhardstacktests(luaD_reallocCI(L, L->size_ci)), ++L->ci)) + + +int luaD_precall (lua_State *L, StkId func, int nresults) { + LClosure *cl; + ptrdiff_t funcr; + if (!ttisfunction(func)) /* `func' is not a function? */ + func = tryfuncTM(L, func); /* check the `function' tag method */ + funcr = savestack(L, func); + cl = &clvalue(func)->l; + L->ci->savedpc = L->savedpc; + if (!cl->isC) { /* Lua function? prepare its call */ + CallInfo *ci; + StkId st, base; + Proto *p = cl->p; + luaD_checkstack(L, p->maxstacksize); + func = restorestack(L, funcr); + if (!p->is_vararg) { /* no varargs? */ + base = func + 1; + if (L->top > base + p->numparams) + L->top = base + p->numparams; + } + else { /* vararg function */ + int nargs = cast_int(L->top - func) - 1; + base = adjust_varargs(L, p, nargs); + func = restorestack(L, funcr); /* previous call may change the stack */ + } + ci = inc_ci(L); /* now `enter' new function */ + ci->func = func; + L->base = ci->base = base; + ci->top = L->base + p->maxstacksize; + lua_assert(ci->top <= L->stack_last); + L->savedpc = p->code; /* starting point */ + ci->tailcalls = 0; + ci->nresults = nresults; + for (st = L->top; st < ci->top; st++) + setnilvalue(st); + L->top = ci->top; + if (L->hookmask & LUA_MASKCALL) { + L->savedpc++; /* hooks assume 'pc' is already incremented */ + luaD_callhook(L, LUA_HOOKCALL, -1); + L->savedpc--; /* correct 'pc' */ + } + return PCRLUA; + } + else { /* if is a C function, call it */ + CallInfo *ci; + int n; + luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ + ci = inc_ci(L); /* now `enter' new function */ + ci->func = restorestack(L, funcr); + L->base = ci->base = ci->func + 1; + ci->top = L->top + LUA_MINSTACK; + lua_assert(ci->top <= L->stack_last); + ci->nresults = nresults; + if (L->hookmask & LUA_MASKCALL) + luaD_callhook(L, LUA_HOOKCALL, -1); + lua_unlock(L); + n = (*curr_func(L)->c.f)(L); /* do the actual call */ + lua_lock(L); + if (n < 0) /* yielding? */ + return PCRYIELD; + else { + luaD_poscall(L, L->top - n); + return PCRC; + } + } +} + + +static StkId callrethooks (lua_State *L, StkId firstResult) { + ptrdiff_t fr = savestack(L, firstResult); /* next call may change stack */ + luaD_callhook(L, LUA_HOOKRET, -1); + if (f_isLua(L->ci)) { /* Lua function? */ + while (L->ci->tailcalls--) /* call hook for eventual tail calls */ + luaD_callhook(L, LUA_HOOKTAILRET, -1); + } + return restorestack(L, fr); +} + + +int luaD_poscall (lua_State *L, StkId firstResult) { + StkId res; + int wanted, i; + CallInfo *ci; + if (L->hookmask & LUA_MASKRET) + firstResult = callrethooks(L, firstResult); + ci = L->ci--; + res = ci->func; /* res == final position of 1st result */ + wanted = ci->nresults; + L->base = (ci - 1)->base; /* restore base */ + L->savedpc = (ci - 1)->savedpc; /* restore savedpc */ + /* move results to correct place */ + for (i = wanted; i != 0 && firstResult < L->top; i--) + setobjs2s(L, res++, firstResult++); + while (i-- > 0) + setnilvalue(res++); + L->top = res; + return (wanted - LUA_MULTRET); /* 0 iff wanted == LUA_MULTRET */ +} + + +/* +** Call a function (C or Lua). The function to be called is at *func. +** The arguments are on the stack, right after the function. +** When returns, all the results are on the stack, starting at the original +** function position. +*/ +void luaD_call (lua_State *L, StkId func, int nResults) { + if (++L->nCcalls >= LUAI_MAXCCALLS) { + if (L->nCcalls == LUAI_MAXCCALLS) + luaG_runerror(L, "C stack overflow"); + else if (L->nCcalls >= (LUAI_MAXCCALLS + (LUAI_MAXCCALLS>>3))) + luaD_throw(L, LUA_ERRERR); /* error while handing stack error */ + } + if (luaD_precall(L, func, nResults) == PCRLUA) /* is a Lua function? */ + luaV_execute(L, 1); /* call it */ + L->nCcalls--; + luaC_checkGC(L); +} + + +static void resume (lua_State *L, void *ud) { + StkId firstArg = cast(StkId, ud); + CallInfo *ci = L->ci; + if (L->status != LUA_YIELD) { /* start coroutine */ + lua_assert(ci == L->base_ci && firstArg > L->base); + if (luaD_precall(L, firstArg - 1, LUA_MULTRET) != PCRLUA) + return; + } + else { /* resuming from previous yield */ + if (!f_isLua(ci)) { /* `common' yield? */ + /* finish interrupted execution of `OP_CALL' */ + lua_assert(GET_OPCODE(*((ci-1)->savedpc - 1)) == OP_CALL || + GET_OPCODE(*((ci-1)->savedpc - 1)) == OP_TAILCALL); + if (luaD_poscall(L, firstArg)) /* complete it... */ + L->top = L->ci->top; /* and correct top if not multiple results */ + } + else /* yielded inside a hook: just continue its execution */ + L->base = L->ci->base; + } + L->status = 0; + luaV_execute(L, cast_int(L->ci - L->base_ci)); +} + + +static int resume_error (lua_State *L, const char *msg) { + L->top = L->ci->base; + setsvalue2s(L, L->top, luaS_new(L, msg)); + incr_top(L); + lua_unlock(L); + return LUA_ERRRUN; +} + + +LUA_API int lua_resume (lua_State *L, int nargs) { + int status; + lua_lock(L); + if (L->status != LUA_YIELD) { + if (L->status != 0) + return resume_error(L, "cannot resume dead coroutine"); + else if (L->ci != L->base_ci) + return resume_error(L, "cannot resume non-suspended coroutine"); + } + luai_userstateresume(L, nargs); + lua_assert(L->errfunc == 0 && L->nCcalls == 0); + status = luaD_rawrunprotected(L, resume, L->top - nargs); + if (status != 0) { /* error? */ + L->status = cast_byte(status); /* mark thread as `dead' */ + luaD_seterrorobj(L, status, L->top); + L->ci->top = L->top; + } + else + status = L->status; + lua_unlock(L); + return status; +} + + +LUA_API int lua_yield (lua_State *L, int nresults) { + luai_userstateyield(L, nresults); + lua_lock(L); + if (L->nCcalls > 0) + luaG_runerror(L, "attempt to yield across metamethod/C-call boundary"); + L->base = L->top - nresults; /* protect stack slots below */ + L->status = LUA_YIELD; + lua_unlock(L); + return -1; +} + + +int luaD_pcall (lua_State *L, Pfunc func, void *u, + ptrdiff_t old_top, ptrdiff_t ef) { + int status; + unsigned short oldnCcalls = L->nCcalls; + ptrdiff_t old_ci = saveci(L, L->ci); + lu_byte old_allowhooks = L->allowhook; + ptrdiff_t old_errfunc = L->errfunc; + L->errfunc = ef; + status = luaD_rawrunprotected(L, func, u); + if (status != 0) { /* an error occurred? */ + StkId oldtop = restorestack(L, old_top); + luaF_close(L, oldtop); /* close eventual pending closures */ + luaD_seterrorobj(L, status, oldtop); + L->nCcalls = oldnCcalls; + L->ci = restoreci(L, old_ci); + L->base = L->ci->base; + L->savedpc = L->ci->savedpc; + L->allowhook = old_allowhooks; + restore_stack_limit(L); + } + L->errfunc = old_errfunc; + return status; +} + + + +/* +** Execute a protected parser. +*/ +struct SParser { /* data to `f_parser' */ + ZIO *z; + Mbuffer buff; /* buffer to be used by the scanner */ + const char *name; +}; + +static void f_parser (lua_State *L, void *ud) { + int i; + Proto *tf; + Closure *cl; + struct SParser *p = cast(struct SParser *, ud); + int c = luaZ_lookahead(p->z); + luaC_checkGC(L); + tf = ((c == LUA_SIGNATURE[0]) ? luaU_undump : luaY_parser)(L, p->z, + &p->buff, p->name); + cl = luaF_newLclosure(L, tf->nups, hvalue(gt(L))); + cl->l.p = tf; + for (i = 0; i < tf->nups; i++) /* initialize eventual upvalues */ + cl->l.upvals[i] = luaF_newupval(L); + setclvalue(L, L->top, cl); + incr_top(L); +} + + +int luaD_protectedparser (lua_State *L, ZIO *z, const char *name) { + struct SParser p; + int status; + p.z = z; p.name = name; + luaZ_initbuffer(L, &p.buff); + status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc); + luaZ_freebuffer(L, &p.buff); + return status; +} + + diff --git a/deps/lua/src/ldo.h b/deps/lua/src/ldo.h new file mode 100644 index 0000000000000000000000000000000000000000..b2de92bb807f4e384d0647bcc56cf1a221153b7e --- /dev/null +++ b/deps/lua/src/ldo.h @@ -0,0 +1,57 @@ +/* +** $Id: ldo.h,v 2.7 2005/08/24 16:15:49 roberto Exp $ +** Stack and Call structure of Lua +** See Copyright Notice in lua.h +*/ + +#ifndef ldo_h +#define ldo_h + + +#include "lobject.h" +#include "lstate.h" +#include "lzio.h" + + +#define luaD_checkstack(L,n) \ + if ((char *)L->stack_last - (char *)L->top <= (n)*(int)sizeof(TValue)) \ + luaD_growstack(L, n); \ + else condhardstacktests(luaD_reallocstack(L, L->stacksize - EXTRA_STACK - 1)); + + +#define incr_top(L) {luaD_checkstack(L,1); L->top++;} + +#define savestack(L,p) ((char *)(p) - (char *)L->stack) +#define restorestack(L,n) ((TValue *)((char *)L->stack + (n))) + +#define saveci(L,p) ((char *)(p) - (char *)L->base_ci) +#define restoreci(L,n) ((CallInfo *)((char *)L->base_ci + (n))) + + +/* results from luaD_precall */ +#define PCRLUA 0 /* initiated a call to a Lua function */ +#define PCRC 1 /* did a call to a C function */ +#define PCRYIELD 2 /* C funtion yielded */ + + +/* type of protected functions, to be ran by `runprotected' */ +typedef void (*Pfunc) (lua_State *L, void *ud); + +LUAI_FUNC int luaD_protectedparser (lua_State *L, ZIO *z, const char *name); +LUAI_FUNC void luaD_callhook (lua_State *L, int event, int line); +LUAI_FUNC int luaD_precall (lua_State *L, StkId func, int nresults); +LUAI_FUNC void luaD_call (lua_State *L, StkId func, int nResults); +LUAI_FUNC int luaD_pcall (lua_State *L, Pfunc func, void *u, + ptrdiff_t oldtop, ptrdiff_t ef); +LUAI_FUNC int luaD_poscall (lua_State *L, StkId firstResult); +LUAI_FUNC void luaD_reallocCI (lua_State *L, int newsize); +LUAI_FUNC void luaD_reallocstack (lua_State *L, int newsize); +LUAI_FUNC void luaD_growstack (lua_State *L, int n); + +LUAI_FUNC void luaD_throw (lua_State *L, int errcode); +LUAI_FUNC int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud); + +LUAI_FUNC void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop); + +#endif + diff --git a/deps/lua/src/ldump.c b/deps/lua/src/ldump.c new file mode 100644 index 0000000000000000000000000000000000000000..f08277d3ac440a48a3f771113729f39f492e57cc --- /dev/null +++ b/deps/lua/src/ldump.c @@ -0,0 +1,164 @@ +/* +** $Id: ldump.c,v 1.15 2006/02/16 15:53:49 lhf Exp $ +** save precompiled Lua chunks +** See Copyright Notice in lua.h +*/ + +#include + +#define ldump_c +#define LUA_CORE + +#include "lua.h" + +#include "lobject.h" +#include "lstate.h" +#include "lundump.h" + +typedef struct { + lua_State* L; + lua_Writer writer; + void* data; + int strip; + int status; +} DumpState; + +#define DumpMem(b,n,size,D) DumpBlock(b,(n)*(size),D) +#define DumpVar(x,D) DumpMem(&x,1,sizeof(x),D) + +static void DumpBlock(const void* b, size_t size, DumpState* D) +{ + if (D->status==0) + { + lua_unlock(D->L); + D->status=(*D->writer)(D->L,b,size,D->data); + lua_lock(D->L); + } +} + +static void DumpChar(int y, DumpState* D) +{ + char x=(char)y; + DumpVar(x,D); +} + +static void DumpInt(int x, DumpState* D) +{ + DumpVar(x,D); +} + +static void DumpNumber(lua_Number x, DumpState* D) +{ + DumpVar(x,D); +} + +static void DumpVector(const void* b, int n, size_t size, DumpState* D) +{ + DumpInt(n,D); + DumpMem(b,n,size,D); +} + +static void DumpString(const TString* s, DumpState* D) +{ + if (s==NULL || getstr(s)==NULL) + { + size_t size=0; + DumpVar(size,D); + } + else + { + size_t size=s->tsv.len+1; /* include trailing '\0' */ + DumpVar(size,D); + DumpBlock(getstr(s),size,D); + } +} + +#define DumpCode(f,D) DumpVector(f->code,f->sizecode,sizeof(Instruction),D) + +static void DumpFunction(const Proto* f, const TString* p, DumpState* D); + +static void DumpConstants(const Proto* f, DumpState* D) +{ + int i,n=f->sizek; + DumpInt(n,D); + for (i=0; ik[i]; + DumpChar(ttype(o),D); + switch (ttype(o)) + { + case LUA_TNIL: + break; + case LUA_TBOOLEAN: + DumpChar(bvalue(o),D); + break; + case LUA_TNUMBER: + DumpNumber(nvalue(o),D); + break; + case LUA_TSTRING: + DumpString(rawtsvalue(o),D); + break; + default: + lua_assert(0); /* cannot happen */ + break; + } + } + n=f->sizep; + DumpInt(n,D); + for (i=0; ip[i],f->source,D); +} + +static void DumpDebug(const Proto* f, DumpState* D) +{ + int i,n; + n= (D->strip) ? 0 : f->sizelineinfo; + DumpVector(f->lineinfo,n,sizeof(int),D); + n= (D->strip) ? 0 : f->sizelocvars; + DumpInt(n,D); + for (i=0; ilocvars[i].varname,D); + DumpInt(f->locvars[i].startpc,D); + DumpInt(f->locvars[i].endpc,D); + } + n= (D->strip) ? 0 : f->sizeupvalues; + DumpInt(n,D); + for (i=0; iupvalues[i],D); +} + +static void DumpFunction(const Proto* f, const TString* p, DumpState* D) +{ + DumpString((f->source==p || D->strip) ? NULL : f->source,D); + DumpInt(f->linedefined,D); + DumpInt(f->lastlinedefined,D); + DumpChar(f->nups,D); + DumpChar(f->numparams,D); + DumpChar(f->is_vararg,D); + DumpChar(f->maxstacksize,D); + DumpCode(f,D); + DumpConstants(f,D); + DumpDebug(f,D); +} + +static void DumpHeader(DumpState* D) +{ + char h[LUAC_HEADERSIZE]; + luaU_header(h); + DumpBlock(h,LUAC_HEADERSIZE,D); +} + +/* +** dump Lua function as precompiled chunk +*/ +int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip) +{ + DumpState D; + D.L=L; + D.writer=w; + D.data=data; + D.strip=strip; + D.status=0; + DumpHeader(&D); + DumpFunction(f,NULL,&D); + return D.status; +} diff --git a/deps/lua/src/lfunc.c b/deps/lua/src/lfunc.c new file mode 100644 index 0000000000000000000000000000000000000000..b8cd67b267221f0eea30e40aaf7f2a5bd9ae44fc --- /dev/null +++ b/deps/lua/src/lfunc.c @@ -0,0 +1,174 @@ +/* +** $Id: lfunc.c,v 2.12 2005/12/22 16:19:56 roberto Exp $ +** Auxiliary functions to manipulate prototypes and closures +** See Copyright Notice in lua.h +*/ + + +#include + +#define lfunc_c +#define LUA_CORE + +#include "lua.h" + +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" + + + +Closure *luaF_newCclosure (lua_State *L, int nelems, Table *e) { + Closure *c = cast(Closure *, luaM_malloc(L, sizeCclosure(nelems))); + luaC_link(L, obj2gco(c), LUA_TFUNCTION); + c->c.isC = 1; + c->c.env = e; + c->c.nupvalues = cast_byte(nelems); + return c; +} + + +Closure *luaF_newLclosure (lua_State *L, int nelems, Table *e) { + Closure *c = cast(Closure *, luaM_malloc(L, sizeLclosure(nelems))); + luaC_link(L, obj2gco(c), LUA_TFUNCTION); + c->l.isC = 0; + c->l.env = e; + c->l.nupvalues = cast_byte(nelems); + while (nelems--) c->l.upvals[nelems] = NULL; + return c; +} + + +UpVal *luaF_newupval (lua_State *L) { + UpVal *uv = luaM_new(L, UpVal); + luaC_link(L, obj2gco(uv), LUA_TUPVAL); + uv->v = &uv->u.value; + setnilvalue(uv->v); + return uv; +} + + +UpVal *luaF_findupval (lua_State *L, StkId level) { + global_State *g = G(L); + GCObject **pp = &L->openupval; + UpVal *p; + UpVal *uv; + while ((p = ngcotouv(*pp)) != NULL && p->v >= level) { + lua_assert(p->v != &p->u.value); + if (p->v == level) { /* found a corresponding upvalue? */ + if (isdead(g, obj2gco(p))) /* is it dead? */ + changewhite(obj2gco(p)); /* ressurect it */ + return p; + } + pp = &p->next; + } + uv = luaM_new(L, UpVal); /* not found: create a new one */ + uv->tt = LUA_TUPVAL; + uv->marked = luaC_white(g); + uv->v = level; /* current value lives in the stack */ + uv->next = *pp; /* chain it in the proper position */ + *pp = obj2gco(uv); + uv->u.l.prev = &g->uvhead; /* double link it in `uvhead' list */ + uv->u.l.next = g->uvhead.u.l.next; + uv->u.l.next->u.l.prev = uv; + g->uvhead.u.l.next = uv; + lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv); + return uv; +} + + +static void unlinkupval (UpVal *uv) { + lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv); + uv->u.l.next->u.l.prev = uv->u.l.prev; /* remove from `uvhead' list */ + uv->u.l.prev->u.l.next = uv->u.l.next; +} + + +void luaF_freeupval (lua_State *L, UpVal *uv) { + if (uv->v != &uv->u.value) /* is it open? */ + unlinkupval(uv); /* remove from open list */ + luaM_free(L, uv); /* free upvalue */ +} + + +void luaF_close (lua_State *L, StkId level) { + UpVal *uv; + global_State *g = G(L); + while ((uv = ngcotouv(L->openupval)) != NULL && uv->v >= level) { + GCObject *o = obj2gco(uv); + lua_assert(!isblack(o) && uv->v != &uv->u.value); + L->openupval = uv->next; /* remove from `open' list */ + if (isdead(g, o)) + luaF_freeupval(L, uv); /* free upvalue */ + else { + unlinkupval(uv); + setobj(L, &uv->u.value, uv->v); + uv->v = &uv->u.value; /* now current value lives here */ + luaC_linkupval(L, uv); /* link upvalue into `gcroot' list */ + } + } +} + + +Proto *luaF_newproto (lua_State *L) { + Proto *f = luaM_new(L, Proto); + luaC_link(L, obj2gco(f), LUA_TPROTO); + f->k = NULL; + f->sizek = 0; + f->p = NULL; + f->sizep = 0; + f->code = NULL; + f->sizecode = 0; + f->sizelineinfo = 0; + f->sizeupvalues = 0; + f->nups = 0; + f->upvalues = NULL; + f->numparams = 0; + f->is_vararg = 0; + f->maxstacksize = 0; + f->lineinfo = NULL; + f->sizelocvars = 0; + f->locvars = NULL; + f->linedefined = 0; + f->lastlinedefined = 0; + f->source = NULL; + return f; +} + + +void luaF_freeproto (lua_State *L, Proto *f) { + luaM_freearray(L, f->code, f->sizecode, Instruction); + luaM_freearray(L, f->p, f->sizep, Proto *); + luaM_freearray(L, f->k, f->sizek, TValue); + luaM_freearray(L, f->lineinfo, f->sizelineinfo, int); + luaM_freearray(L, f->locvars, f->sizelocvars, struct LocVar); + luaM_freearray(L, f->upvalues, f->sizeupvalues, TString *); + luaM_free(L, f); +} + + +void luaF_freeclosure (lua_State *L, Closure *c) { + int size = (c->c.isC) ? sizeCclosure(c->c.nupvalues) : + sizeLclosure(c->l.nupvalues); + luaM_freemem(L, c, size); +} + + +/* +** Look for n-th local variable at line `line' in function `func'. +** Returns NULL if not found. +*/ +const char *luaF_getlocalname (const Proto *f, int local_number, int pc) { + int i; + for (i = 0; isizelocvars && f->locvars[i].startpc <= pc; i++) { + if (pc < f->locvars[i].endpc) { /* is variable active? */ + local_number--; + if (local_number == 0) + return getstr(f->locvars[i].varname); + } + } + return NULL; /* not found */ +} + diff --git a/deps/lua/src/lfunc.h b/deps/lua/src/lfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..2e02419bd6bc3f3efd67a007dfbb40f3f3300459 --- /dev/null +++ b/deps/lua/src/lfunc.h @@ -0,0 +1,34 @@ +/* +** $Id: lfunc.h,v 2.4 2005/04/25 19:24:10 roberto Exp $ +** Auxiliary functions to manipulate prototypes and closures +** See Copyright Notice in lua.h +*/ + +#ifndef lfunc_h +#define lfunc_h + + +#include "lobject.h" + + +#define sizeCclosure(n) (cast(int, sizeof(CClosure)) + \ + cast(int, sizeof(TValue)*((n)-1))) + +#define sizeLclosure(n) (cast(int, sizeof(LClosure)) + \ + cast(int, sizeof(TValue *)*((n)-1))) + + +LUAI_FUNC Proto *luaF_newproto (lua_State *L); +LUAI_FUNC Closure *luaF_newCclosure (lua_State *L, int nelems, Table *e); +LUAI_FUNC Closure *luaF_newLclosure (lua_State *L, int nelems, Table *e); +LUAI_FUNC UpVal *luaF_newupval (lua_State *L); +LUAI_FUNC UpVal *luaF_findupval (lua_State *L, StkId level); +LUAI_FUNC void luaF_close (lua_State *L, StkId level); +LUAI_FUNC void luaF_freeproto (lua_State *L, Proto *f); +LUAI_FUNC void luaF_freeclosure (lua_State *L, Closure *c); +LUAI_FUNC void luaF_freeupval (lua_State *L, UpVal *uv); +LUAI_FUNC const char *luaF_getlocalname (const Proto *func, int local_number, + int pc); + + +#endif diff --git a/deps/lua/src/lgc.c b/deps/lua/src/lgc.c new file mode 100644 index 0000000000000000000000000000000000000000..691565db49ff3f95bbc121cb475e6ce669d825ba --- /dev/null +++ b/deps/lua/src/lgc.c @@ -0,0 +1,707 @@ +/* +** $Id: lgc.c,v 2.37 2005/12/22 16:19:56 roberto Exp $ +** Garbage Collector +** See Copyright Notice in lua.h +*/ + +#include + +#define lgc_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" + + +#define GCSTEPSIZE 1024u +#define GCSWEEPMAX 40 +#define GCSWEEPCOST 10 +#define GCFINALIZECOST 100 + + +#define maskmarks cast_byte(~(bitmask(BLACKBIT)|WHITEBITS)) + +#define makewhite(g,x) \ + ((x)->gch.marked = cast_byte(((x)->gch.marked & maskmarks) | luaC_white(g))) + +#define white2gray(x) reset2bits((x)->gch.marked, WHITE0BIT, WHITE1BIT) +#define black2gray(x) resetbit((x)->gch.marked, BLACKBIT) + +#define stringmark(s) reset2bits((s)->tsv.marked, WHITE0BIT, WHITE1BIT) + + +#define isfinalized(u) testbit((u)->marked, FINALIZEDBIT) +#define markfinalized(u) l_setbit((u)->marked, FINALIZEDBIT) + + +#define KEYWEAK bitmask(KEYWEAKBIT) +#define VALUEWEAK bitmask(VALUEWEAKBIT) + + + +#define markvalue(g,o) { checkconsistency(o); \ + if (iscollectable(o) && iswhite(gcvalue(o))) reallymarkobject(g,gcvalue(o)); } + +#define markobject(g,t) { if (iswhite(obj2gco(t))) \ + reallymarkobject(g, obj2gco(t)); } + + +#define setthreshold(g) (g->GCthreshold = (g->estimate/100) * g->gcpause) + + +static void removeentry (Node *n) { + lua_assert(ttisnil(gval(n))); + if (iscollectable(gkey(n))) + setttype(gkey(n), LUA_TDEADKEY); /* dead key; remove it */ +} + + +static void reallymarkobject (global_State *g, GCObject *o) { + lua_assert(iswhite(o) && !isdead(g, o)); + white2gray(o); + switch (o->gch.tt) { + case LUA_TSTRING: { + return; + } + case LUA_TUSERDATA: { + Table *mt = gco2u(o)->metatable; + gray2black(o); /* udata are never gray */ + if (mt) markobject(g, mt); + markobject(g, gco2u(o)->env); + return; + } + case LUA_TUPVAL: { + UpVal *uv = gco2uv(o); + markvalue(g, uv->v); + if (uv->v == &uv->u.value) /* closed? */ + gray2black(o); /* open upvalues are never black */ + return; + } + case LUA_TFUNCTION: { + gco2cl(o)->c.gclist = g->gray; + g->gray = o; + break; + } + case LUA_TTABLE: { + gco2h(o)->gclist = g->gray; + g->gray = o; + break; + } + case LUA_TTHREAD: { + gco2th(o)->gclist = g->gray; + g->gray = o; + break; + } + case LUA_TPROTO: { + gco2p(o)->gclist = g->gray; + g->gray = o; + break; + } + default: lua_assert(0); + } +} + + +static void marktmu (global_State *g) { + GCObject *u = g->tmudata; + if (u) { + do { + u = u->gch.next; + makewhite(g, u); /* may be marked, if left from previous GC */ + reallymarkobject(g, u); + } while (u != g->tmudata); + } +} + + +/* move `dead' udata that need finalization to list `tmudata' */ +size_t luaC_separateudata (lua_State *L, int all) { + global_State *g = G(L); + size_t deadmem = 0; + GCObject **p = &g->mainthread->next; + GCObject *curr; + while ((curr = *p) != NULL) { + if (!(iswhite(curr) || all) || isfinalized(gco2u(curr))) + p = &curr->gch.next; /* don't bother with them */ + else if (fasttm(L, gco2u(curr)->metatable, TM_GC) == NULL) { + markfinalized(gco2u(curr)); /* don't need finalization */ + p = &curr->gch.next; + } + else { /* must call its gc method */ + deadmem += sizeudata(gco2u(curr)); + markfinalized(gco2u(curr)); + *p = curr->gch.next; + /* link `curr' at the end of `tmudata' list */ + if (g->tmudata == NULL) /* list is empty? */ + g->tmudata = curr->gch.next = curr; /* creates a circular list */ + else { + curr->gch.next = g->tmudata->gch.next; + g->tmudata->gch.next = curr; + g->tmudata = curr; + } + } + } + return deadmem; +} + + +static int traversetable (global_State *g, Table *h) { + int i; + int weakkey = 0; + int weakvalue = 0; + const TValue *mode; + if (h->metatable) + markobject(g, h->metatable); + mode = gfasttm(g, h->metatable, TM_MODE); + if (mode && ttisstring(mode)) { /* is there a weak mode? */ + weakkey = (strchr(svalue(mode), 'k') != NULL); + weakvalue = (strchr(svalue(mode), 'v') != NULL); + if (weakkey || weakvalue) { /* is really weak? */ + h->marked &= ~(KEYWEAK | VALUEWEAK); /* clear bits */ + h->marked |= cast_byte((weakkey << KEYWEAKBIT) | + (weakvalue << VALUEWEAKBIT)); + h->gclist = g->weak; /* must be cleared after GC, ... */ + g->weak = obj2gco(h); /* ... so put in the appropriate list */ + } + } + if (weakkey && weakvalue) return 1; + if (!weakvalue) { + i = h->sizearray; + while (i--) + markvalue(g, &h->array[i]); + } + i = sizenode(h); + while (i--) { + Node *n = gnode(h, i); + lua_assert(ttype(gkey(n)) != LUA_TDEADKEY || ttisnil(gval(n))); + if (ttisnil(gval(n))) + removeentry(n); /* remove empty entries */ + else { + lua_assert(!ttisnil(gkey(n))); + if (!weakkey) markvalue(g, gkey(n)); + if (!weakvalue) markvalue(g, gval(n)); + } + } + return weakkey || weakvalue; +} + + +/* +** All marks are conditional because a GC may happen while the +** prototype is still being created +*/ +static void traverseproto (global_State *g, Proto *f) { + int i; + if (f->source) stringmark(f->source); + for (i=0; isizek; i++) /* mark literals */ + markvalue(g, &f->k[i]); + for (i=0; isizeupvalues; i++) { /* mark upvalue names */ + if (f->upvalues[i]) + stringmark(f->upvalues[i]); + } + for (i=0; isizep; i++) { /* mark nested protos */ + if (f->p[i]) + markobject(g, f->p[i]); + } + for (i=0; isizelocvars; i++) { /* mark local-variable names */ + if (f->locvars[i].varname) + stringmark(f->locvars[i].varname); + } +} + + + +static void traverseclosure (global_State *g, Closure *cl) { + markobject(g, cl->c.env); + if (cl->c.isC) { + int i; + for (i=0; ic.nupvalues; i++) /* mark its upvalues */ + markvalue(g, &cl->c.upvalue[i]); + } + else { + int i; + lua_assert(cl->l.nupvalues == cl->l.p->nups); + markobject(g, cl->l.p); + for (i=0; il.nupvalues; i++) /* mark its upvalues */ + markobject(g, cl->l.upvals[i]); + } +} + + +static void checkstacksizes (lua_State *L, StkId max) { + int ci_used = cast_int(L->ci - L->base_ci); /* number of `ci' in use */ + int s_used = cast_int(max - L->stack); /* part of stack in use */ + if (L->size_ci > LUAI_MAXCALLS) /* handling overflow? */ + return; /* do not touch the stacks */ + if (4*ci_used < L->size_ci && 2*BASIC_CI_SIZE < L->size_ci) + luaD_reallocCI(L, L->size_ci/2); /* still big enough... */ + condhardstacktests(luaD_reallocCI(L, ci_used + 1)); + if (4*s_used < L->stacksize && + 2*(BASIC_STACK_SIZE+EXTRA_STACK) < L->stacksize) + luaD_reallocstack(L, L->stacksize/2); /* still big enough... */ + condhardstacktests(luaD_reallocstack(L, s_used)); +} + + +static void traversestack (global_State *g, lua_State *l) { + StkId o, lim; + CallInfo *ci; + markvalue(g, gt(l)); + lim = l->top; + for (ci = l->base_ci; ci <= l->ci; ci++) { + lua_assert(ci->top <= l->stack_last); + if (lim < ci->top) lim = ci->top; + } + for (o = l->stack; o < l->top; o++) + markvalue(g, o); + for (; o <= lim; o++) + setnilvalue(o); + checkstacksizes(l, lim); +} + + +/* +** traverse one gray object, turning it to black. +** Returns `quantity' traversed. +*/ +static l_mem propagatemark (global_State *g) { + GCObject *o = g->gray; + lua_assert(isgray(o)); + gray2black(o); + switch (o->gch.tt) { + case LUA_TTABLE: { + Table *h = gco2h(o); + g->gray = h->gclist; + if (traversetable(g, h)) /* table is weak? */ + black2gray(o); /* keep it gray */ + return sizeof(Table) + sizeof(TValue) * h->sizearray + + sizeof(Node) * sizenode(h); + } + case LUA_TFUNCTION: { + Closure *cl = gco2cl(o); + g->gray = cl->c.gclist; + traverseclosure(g, cl); + return (cl->c.isC) ? sizeCclosure(cl->c.nupvalues) : + sizeLclosure(cl->l.nupvalues); + } + case LUA_TTHREAD: { + lua_State *th = gco2th(o); + g->gray = th->gclist; + th->gclist = g->grayagain; + g->grayagain = o; + black2gray(o); + traversestack(g, th); + return sizeof(lua_State) + sizeof(TValue) * th->stacksize + + sizeof(CallInfo) * th->size_ci; + } + case LUA_TPROTO: { + Proto *p = gco2p(o); + g->gray = p->gclist; + traverseproto(g, p); + return sizeof(Proto) + sizeof(Instruction) * p->sizecode + + sizeof(Proto *) * p->sizep + + sizeof(TValue) * p->sizek + + sizeof(int) * p->sizelineinfo + + sizeof(LocVar) * p->sizelocvars + + sizeof(TString *) * p->sizeupvalues; + } + default: lua_assert(0); return 0; + } +} + + +static void propagateall (global_State *g) { + while (g->gray) propagatemark(g); +} + + +/* +** The next function tells whether a key or value can be cleared from +** a weak table. Non-collectable objects are never removed from weak +** tables. Strings behave as `values', so are never removed too. for +** other objects: if really collected, cannot keep them; for userdata +** being finalized, keep them in keys, but not in values +*/ +static int iscleared (const TValue *o, int iskey) { + if (!iscollectable(o)) return 0; + if (ttisstring(o)) { + stringmark(rawtsvalue(o)); /* strings are `values', so are never weak */ + return 0; + } + return iswhite(gcvalue(o)) || + (ttisuserdata(o) && (!iskey && isfinalized(uvalue(o)))); +} + + +/* +** clear collected entries from weaktables +*/ +static void cleartable (GCObject *l) { + while (l) { + Table *h = gco2h(l); + int i = h->sizearray; + lua_assert(testbit(h->marked, VALUEWEAKBIT) || + testbit(h->marked, KEYWEAKBIT)); + if (testbit(h->marked, VALUEWEAKBIT)) { + while (i--) { + TValue *o = &h->array[i]; + if (iscleared(o, 0)) /* value was collected? */ + setnilvalue(o); /* remove value */ + } + } + i = sizenode(h); + while (i--) { + Node *n = gnode(h, i); + if (!ttisnil(gval(n)) && /* non-empty entry? */ + (iscleared(key2tval(n), 1) || iscleared(gval(n), 0))) { + setnilvalue(gval(n)); /* remove value ... */ + removeentry(n); /* remove entry from table */ + } + } + l = h->gclist; + } +} + + +static void freeobj (lua_State *L, GCObject *o) { + switch (o->gch.tt) { + case LUA_TPROTO: luaF_freeproto(L, gco2p(o)); break; + case LUA_TFUNCTION: luaF_freeclosure(L, gco2cl(o)); break; + case LUA_TUPVAL: luaF_freeupval(L, gco2uv(o)); break; + case LUA_TTABLE: luaH_free(L, gco2h(o)); break; + case LUA_TTHREAD: { + lua_assert(gco2th(o) != L && gco2th(o) != G(L)->mainthread); + luaE_freethread(L, gco2th(o)); + break; + } + case LUA_TSTRING: { + G(L)->strt.nuse--; + luaM_freemem(L, o, sizestring(gco2ts(o))); + break; + } + case LUA_TUSERDATA: { + luaM_freemem(L, o, sizeudata(gco2u(o))); + break; + } + default: lua_assert(0); + } +} + + + +#define sweepwholelist(L,p) sweeplist(L,p,MAX_LUMEM) + + +static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count) { + GCObject *curr; + global_State *g = G(L); + int deadmask = otherwhite(g); + while ((curr = *p) != NULL && count-- > 0) { + if (curr->gch.tt == LUA_TTHREAD) /* sweep open upvalues of each thread */ + sweepwholelist(L, &gco2th(curr)->openupval); + if ((curr->gch.marked ^ WHITEBITS) & deadmask) { /* not dead? */ + lua_assert(!isdead(g, curr) || testbit(curr->gch.marked, FIXEDBIT)); + makewhite(g, curr); /* make it white (for next cycle) */ + p = &curr->gch.next; + } + else { /* must erase `curr' */ + lua_assert(isdead(g, curr) || deadmask == bitmask(SFIXEDBIT)); + *p = curr->gch.next; + if (curr == g->rootgc) /* is the first element of the list? */ + g->rootgc = curr->gch.next; /* adjust first */ + freeobj(L, curr); + } + } + return p; +} + + +static void checkSizes (lua_State *L) { + global_State *g = G(L); + /* check size of string hash */ + if (g->strt.nuse < cast(lu_int32, g->strt.size/4) && + g->strt.size > MINSTRTABSIZE*2) + luaS_resize(L, g->strt.size/2); /* table is too big */ + /* check size of buffer */ + if (luaZ_sizebuffer(&g->buff) > LUA_MINBUFFER*2) { /* buffer too big? */ + size_t newsize = luaZ_sizebuffer(&g->buff) / 2; + luaZ_resizebuffer(L, &g->buff, newsize); + } +} + + +static void GCTM (lua_State *L) { + global_State *g = G(L); + GCObject *o = g->tmudata->gch.next; /* get first element */ + Udata *udata = rawgco2u(o); + const TValue *tm; + /* remove udata from `tmudata' */ + if (o == g->tmudata) /* last element? */ + g->tmudata = NULL; + else + g->tmudata->gch.next = udata->uv.next; + udata->uv.next = g->mainthread->next; /* return it to `root' list */ + g->mainthread->next = o; + makewhite(g, o); + tm = fasttm(L, udata->uv.metatable, TM_GC); + if (tm != NULL) { + lu_byte oldah = L->allowhook; + lu_mem oldt = g->GCthreshold; + L->allowhook = 0; /* stop debug hooks during GC tag method */ + g->GCthreshold = 2*g->totalbytes; /* avoid GC steps */ + setobj2s(L, L->top, tm); + setuvalue(L, L->top+1, udata); + L->top += 2; + luaD_call(L, L->top - 2, 0); + L->allowhook = oldah; /* restore hooks */ + g->GCthreshold = oldt; /* restore threshold */ + } +} + + +/* +** Call all GC tag methods +*/ +void luaC_callGCTM (lua_State *L) { + while (G(L)->tmudata) + GCTM(L); +} + + +void luaC_freeall (lua_State *L) { + global_State *g = G(L); + int i; + g->currentwhite = WHITEBITS | bitmask(SFIXEDBIT); /* mask to collect all elements */ + sweepwholelist(L, &g->rootgc); + for (i = 0; i < g->strt.size; i++) /* free all string lists */ + sweepwholelist(L, &g->strt.hash[i]); +} + + +static void markmt (global_State *g) { + int i; + for (i=0; imt[i]) markobject(g, g->mt[i]); +} + + +/* mark root set */ +static void markroot (lua_State *L) { + global_State *g = G(L); + g->gray = NULL; + g->grayagain = NULL; + g->weak = NULL; + markobject(g, g->mainthread); + /* make global table be traversed before main stack */ + markvalue(g, gt(g->mainthread)); + markvalue(g, registry(L)); + markmt(g); + g->gcstate = GCSpropagate; +} + + +static void remarkupvals (global_State *g) { + UpVal *uv; + for (uv = g->uvhead.u.l.next; uv != &g->uvhead; uv = uv->u.l.next) { + lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv); + if (isgray(obj2gco(uv))) + markvalue(g, uv->v); + } +} + + +static void atomic (lua_State *L) { + global_State *g = G(L); + size_t udsize; /* total size of userdata to be finalized */ + /* remark occasional upvalues of (maybe) dead threads */ + remarkupvals(g); + /* traverse objects cautch by write barrier and by 'remarkupvals' */ + propagateall(g); + /* remark weak tables */ + g->gray = g->weak; + g->weak = NULL; + lua_assert(!iswhite(obj2gco(g->mainthread))); + markobject(g, L); /* mark running thread */ + markmt(g); /* mark basic metatables (again) */ + propagateall(g); + /* remark gray again */ + g->gray = g->grayagain; + g->grayagain = NULL; + propagateall(g); + udsize = luaC_separateudata(L, 0); /* separate userdata to be finalized */ + marktmu(g); /* mark `preserved' userdata */ + propagateall(g); /* remark, to propagate `preserveness' */ + cleartable(g->weak); /* remove collected objects from weak tables */ + /* flip current white */ + g->currentwhite = cast_byte(otherwhite(g)); + g->sweepstrgc = 0; + g->sweepgc = &g->rootgc; + g->gcstate = GCSsweepstring; + g->estimate = g->totalbytes - udsize; /* first estimate */ +} + + +static l_mem singlestep (lua_State *L) { + global_State *g = G(L); + /*lua_checkmemory(L);*/ + switch (g->gcstate) { + case GCSpause: { + markroot(L); /* start a new collection */ + return 0; + } + case GCSpropagate: { + if (g->gray) + return propagatemark(g); + else { /* no more `gray' objects */ + atomic(L); /* finish mark phase */ + return 0; + } + } + case GCSsweepstring: { + lu_mem old = g->totalbytes; + sweepwholelist(L, &g->strt.hash[g->sweepstrgc++]); + if (g->sweepstrgc >= g->strt.size) /* nothing more to sweep? */ + g->gcstate = GCSsweep; /* end sweep-string phase */ + lua_assert(old >= g->totalbytes); + g->estimate -= old - g->totalbytes; + return GCSWEEPCOST; + } + case GCSsweep: { + lu_mem old = g->totalbytes; + g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX); + if (*g->sweepgc == NULL) { /* nothing more to sweep? */ + checkSizes(L); + g->gcstate = GCSfinalize; /* end sweep phase */ + } + lua_assert(old >= g->totalbytes); + g->estimate -= old - g->totalbytes; + return GCSWEEPMAX*GCSWEEPCOST; + } + case GCSfinalize: { + if (g->tmudata) { + GCTM(L); + return GCFINALIZECOST; + } + else { + g->gcstate = GCSpause; /* end collection */ + g->gcdept = 0; + return 0; + } + } + default: lua_assert(0); return 0; + } +} + + +void luaC_step (lua_State *L) { + global_State *g = G(L); + l_mem lim = (GCSTEPSIZE/100) * g->gcstepmul; + if (lim == 0) + lim = (MAX_LUMEM-1)/2; /* no limit */ + g->gcdept += g->totalbytes - g->GCthreshold; + do { + lim -= singlestep(L); + if (g->gcstate == GCSpause) + break; + } while (lim > 0); + if (g->gcstate != GCSpause) { + if (g->gcdept < GCSTEPSIZE) + g->GCthreshold = g->totalbytes + GCSTEPSIZE; /* - lim/g->gcstepmul;*/ + else { + g->gcdept -= GCSTEPSIZE; + g->GCthreshold = g->totalbytes; + } + } + else { + lua_assert(g->totalbytes >= g->estimate); + setthreshold(g); + } +} + + +void luaC_fullgc (lua_State *L) { + global_State *g = G(L); + if (g->gcstate <= GCSpropagate) { + /* reset sweep marks to sweep all elements (returning them to white) */ + g->sweepstrgc = 0; + g->sweepgc = &g->rootgc; + /* reset other collector lists */ + g->gray = NULL; + g->grayagain = NULL; + g->weak = NULL; + g->gcstate = GCSsweepstring; + } + lua_assert(g->gcstate != GCSpause && g->gcstate != GCSpropagate); + /* finish any pending sweep phase */ + while (g->gcstate != GCSfinalize) { + lua_assert(g->gcstate == GCSsweepstring || g->gcstate == GCSsweep); + singlestep(L); + } + markroot(L); + while (g->gcstate != GCSpause) { + singlestep(L); + } + setthreshold(g); +} + + +void luaC_barrierf (lua_State *L, GCObject *o, GCObject *v) { + global_State *g = G(L); + lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); + lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause); + lua_assert(ttype(&o->gch) != LUA_TTABLE); + /* must keep invariant? */ + if (g->gcstate == GCSpropagate) + reallymarkobject(g, v); /* restore invariant */ + else /* don't mind */ + makewhite(g, o); /* mark as white just to avoid other barriers */ +} + + +void luaC_barrierback (lua_State *L, Table *t) { + global_State *g = G(L); + GCObject *o = obj2gco(t); + lua_assert(isblack(o) && !isdead(g, o)); + lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause); + black2gray(o); /* make table gray (again) */ + t->gclist = g->grayagain; + g->grayagain = o; +} + + +void luaC_link (lua_State *L, GCObject *o, lu_byte tt) { + global_State *g = G(L); + o->gch.next = g->rootgc; + g->rootgc = o; + o->gch.marked = luaC_white(g); + o->gch.tt = tt; +} + + +void luaC_linkupval (lua_State *L, UpVal *uv) { + global_State *g = G(L); + GCObject *o = obj2gco(uv); + o->gch.next = g->rootgc; /* link upvalue into `rootgc' list */ + g->rootgc = o; + if (isgray(o)) { + if (g->gcstate == GCSpropagate) { + gray2black(o); /* closed upvalues need barrier */ + luaC_barrier(L, uv, uv->v); + } + else { /* sweep phase: sweep it (turning it into white) */ + makewhite(g, o); + lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause); + } + } +} + diff --git a/deps/lua/src/lgc.h b/deps/lua/src/lgc.h new file mode 100644 index 0000000000000000000000000000000000000000..5f69acb1e3b8ecf2717e8f0acc687a6527b8d20f --- /dev/null +++ b/deps/lua/src/lgc.h @@ -0,0 +1,110 @@ +/* +** $Id: lgc.h,v 2.15 2005/08/24 16:15:49 roberto Exp $ +** Garbage Collector +** See Copyright Notice in lua.h +*/ + +#ifndef lgc_h +#define lgc_h + + +#include "lobject.h" + + +/* +** Possible states of the Garbage Collector +*/ +#define GCSpause 0 +#define GCSpropagate 1 +#define GCSsweepstring 2 +#define GCSsweep 3 +#define GCSfinalize 4 + + +/* +** some userful bit tricks +*/ +#define resetbits(x,m) ((x) &= cast(lu_byte, ~(m))) +#define setbits(x,m) ((x) |= (m)) +#define testbits(x,m) ((x) & (m)) +#define bitmask(b) (1<<(b)) +#define bit2mask(b1,b2) (bitmask(b1) | bitmask(b2)) +#define l_setbit(x,b) setbits(x, bitmask(b)) +#define resetbit(x,b) resetbits(x, bitmask(b)) +#define testbit(x,b) testbits(x, bitmask(b)) +#define set2bits(x,b1,b2) setbits(x, (bit2mask(b1, b2))) +#define reset2bits(x,b1,b2) resetbits(x, (bit2mask(b1, b2))) +#define test2bits(x,b1,b2) testbits(x, (bit2mask(b1, b2))) + + + +/* +** Layout for bit use in `marked' field: +** bit 0 - object is white (type 0) +** bit 1 - object is white (type 1) +** bit 2 - object is black +** bit 3 - for userdata: has been finalized +** bit 3 - for tables: has weak keys +** bit 4 - for tables: has weak values +** bit 5 - object is fixed (should not be collected) +** bit 6 - object is "super" fixed (only the main thread) +*/ + + +#define WHITE0BIT 0 +#define WHITE1BIT 1 +#define BLACKBIT 2 +#define FINALIZEDBIT 3 +#define KEYWEAKBIT 3 +#define VALUEWEAKBIT 4 +#define FIXEDBIT 5 +#define SFIXEDBIT 6 +#define WHITEBITS bit2mask(WHITE0BIT, WHITE1BIT) + + +#define iswhite(x) test2bits((x)->gch.marked, WHITE0BIT, WHITE1BIT) +#define isblack(x) testbit((x)->gch.marked, BLACKBIT) +#define isgray(x) (!isblack(x) && !iswhite(x)) + +#define otherwhite(g) (g->currentwhite ^ WHITEBITS) +#define isdead(g,v) ((v)->gch.marked & otherwhite(g) & WHITEBITS) + +#define changewhite(x) ((x)->gch.marked ^= WHITEBITS) +#define gray2black(x) l_setbit((x)->gch.marked, BLACKBIT) + +#define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) + +#define luaC_white(g) cast(lu_byte, (g)->currentwhite & WHITEBITS) + + +#define luaC_checkGC(L) { \ + condhardstacktests(luaD_reallocstack(L, L->stacksize - EXTRA_STACK - 1)); \ + if (G(L)->totalbytes >= G(L)->GCthreshold) \ + luaC_step(L); } + + +#define luaC_barrier(L,p,v) { if (valiswhite(v) && isblack(obj2gco(p))) \ + luaC_barrierf(L,obj2gco(p),gcvalue(v)); } + +#define luaC_barriert(L,t,v) { if (valiswhite(v) && isblack(obj2gco(t))) \ + luaC_barrierback(L,t); } + +#define luaC_objbarrier(L,p,o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(p))) \ + luaC_barrierf(L,obj2gco(p),obj2gco(o)); } + +#define luaC_objbarriert(L,t,o) \ + { if (iswhite(obj2gco(o)) && isblack(obj2gco(t))) luaC_barrierback(L,t); } + +LUAI_FUNC size_t luaC_separateudata (lua_State *L, int all); +LUAI_FUNC void luaC_callGCTM (lua_State *L); +LUAI_FUNC void luaC_freeall (lua_State *L); +LUAI_FUNC void luaC_step (lua_State *L); +LUAI_FUNC void luaC_fullgc (lua_State *L); +LUAI_FUNC void luaC_link (lua_State *L, GCObject *o, lu_byte tt); +LUAI_FUNC void luaC_linkupval (lua_State *L, UpVal *uv); +LUAI_FUNC void luaC_barrierf (lua_State *L, GCObject *o, GCObject *v); +LUAI_FUNC void luaC_barrierback (lua_State *L, Table *t); + + +#endif diff --git a/deps/lua/src/linit.c b/deps/lua/src/linit.c new file mode 100644 index 0000000000000000000000000000000000000000..483d9c8c282e13eb129d2919adc71fafd8318a17 --- /dev/null +++ b/deps/lua/src/linit.c @@ -0,0 +1,38 @@ +/* +** $Id: linit.c,v 1.14 2005/12/29 15:32:11 roberto Exp $ +** Initialization of libraries for lua.c +** See Copyright Notice in lua.h +*/ + + +#define linit_c +#define LUA_LIB + +#include "lua.h" + +#include "lualib.h" +#include "lauxlib.h" + + +static const luaL_Reg lualibs[] = { + {"", luaopen_base}, + {LUA_LOADLIBNAME, luaopen_package}, + {LUA_TABLIBNAME, luaopen_table}, + {LUA_IOLIBNAME, luaopen_io}, + {LUA_OSLIBNAME, luaopen_os}, + {LUA_STRLIBNAME, luaopen_string}, + {LUA_MATHLIBNAME, luaopen_math}, + {LUA_DBLIBNAME, luaopen_debug}, + {NULL, NULL} +}; + + +LUALIB_API void luaL_openlibs (lua_State *L) { + const luaL_Reg *lib = lualibs; + for (; lib->func; lib++) { + lua_pushcfunction(L, lib->func); + lua_pushstring(L, lib->name); + lua_call(L, 1, 0); + } +} + diff --git a/deps/lua/src/liolib.c b/deps/lua/src/liolib.c new file mode 100644 index 0000000000000000000000000000000000000000..bb3b5194d939defaa8667e751dafa20771711287 --- /dev/null +++ b/deps/lua/src/liolib.c @@ -0,0 +1,532 @@ +/* +** $Id: liolib.c,v 2.72 2006/01/28 12:59:13 roberto Exp $ +** Standard I/O (and system) library +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include + +#define liolib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + +#define IO_INPUT 1 +#define IO_OUTPUT 2 + + +static const char *const fnames[] = {"input", "output"}; + + +static int pushresult (lua_State *L, int i, const char *filename) { + int en = errno; /* calls to Lua API may change this value */ + if (i) { + lua_pushboolean(L, 1); + return 1; + } + else { + lua_pushnil(L); + if (filename) + lua_pushfstring(L, "%s: %s", filename, strerror(en)); + else + lua_pushfstring(L, "%s", strerror(en)); + lua_pushinteger(L, en); + return 3; + } +} + + +static void fileerror (lua_State *L, int arg, const char *filename) { + lua_pushfstring(L, "%s: %s", filename, strerror(errno)); + luaL_argerror(L, arg, lua_tostring(L, -1)); +} + + +#define topfile(L) ((FILE **)luaL_checkudata(L, 1, LUA_FILEHANDLE)) + + +static int io_type (lua_State *L) { + void *ud; + luaL_checkany(L, 1); + ud = lua_touserdata(L, 1); + lua_getfield(L, LUA_REGISTRYINDEX, LUA_FILEHANDLE); + if (ud == NULL || !lua_getmetatable(L, 1) || !lua_rawequal(L, -2, -1)) + lua_pushnil(L); /* not a file */ + else if (*((FILE **)ud) == NULL) + lua_pushliteral(L, "closed file"); + else + lua_pushliteral(L, "file"); + return 1; +} + + +static FILE *tofile (lua_State *L) { + FILE **f = topfile(L); + if (*f == NULL) + luaL_error(L, "attempt to use a closed file"); + return *f; +} + + + +/* +** When creating file handles, always creates a `closed' file handle +** before opening the actual file; so, if there is a memory error, the +** file is not left opened. +*/ +static FILE **newfile (lua_State *L) { + FILE **pf = (FILE **)lua_newuserdata(L, sizeof(FILE *)); + *pf = NULL; /* file handle is currently `closed' */ + luaL_getmetatable(L, LUA_FILEHANDLE); + lua_setmetatable(L, -2); + return pf; +} + + +/* +** this function has a separated environment, which defines the +** correct __close for 'popen' files +*/ +static int io_pclose (lua_State *L) { + FILE **p = topfile(L); + int ok = lua_pclose(L, *p); + if (ok) *p = NULL; + return pushresult(L, ok, NULL); +} + + +static int io_fclose (lua_State *L) { + FILE **p = topfile(L); + int ok = (fclose(*p) == 0); + if (ok) *p = NULL; + return pushresult(L, ok, NULL); +} + + +static int aux_close (lua_State *L) { + lua_getfenv(L, 1); + lua_getfield(L, -1, "__close"); + return (lua_tocfunction(L, -1))(L); +} + + +static int io_close (lua_State *L) { + if (lua_isnone(L, 1)) + lua_rawgeti(L, LUA_ENVIRONINDEX, IO_OUTPUT); + tofile(L); /* make sure argument is a file */ + return aux_close(L); +} + + +static int io_gc (lua_State *L) { + FILE *f = *topfile(L); + /* ignore closed files and standard files */ + if (f != NULL && f != stdin && f != stdout && f != stderr) + aux_close(L); + return 0; +} + + +static int io_tostring (lua_State *L) { + FILE *f = *topfile(L); + if (f == NULL) + lua_pushstring(L, "file (closed)"); + else + lua_pushfstring(L, "file (%p)", f); + return 1; +} + + +static int io_open (lua_State *L) { + const char *filename = luaL_checkstring(L, 1); + const char *mode = luaL_optstring(L, 2, "r"); + FILE **pf = newfile(L); + *pf = fopen(filename, mode); + return (*pf == NULL) ? pushresult(L, 0, filename) : 1; +} + + +static int io_popen (lua_State *L) { + const char *filename = luaL_checkstring(L, 1); + const char *mode = luaL_optstring(L, 2, "r"); + FILE **pf = newfile(L); + *pf = lua_popen(L, filename, mode); + return (*pf == NULL) ? pushresult(L, 0, filename) : 1; +} + + +static int io_tmpfile (lua_State *L) { + FILE **pf = newfile(L); + *pf = tmpfile(); + return (*pf == NULL) ? pushresult(L, 0, NULL) : 1; +} + + +static FILE *getiofile (lua_State *L, int findex) { + FILE *f; + lua_rawgeti(L, LUA_ENVIRONINDEX, findex); + f = *(FILE **)lua_touserdata(L, -1); + if (f == NULL) + luaL_error(L, "standard %s file is closed", fnames[findex - 1]); + return f; +} + + +static int g_iofile (lua_State *L, int f, const char *mode) { + if (!lua_isnoneornil(L, 1)) { + const char *filename = lua_tostring(L, 1); + if (filename) { + FILE **pf = newfile(L); + *pf = fopen(filename, mode); + if (*pf == NULL) + fileerror(L, 1, filename); + } + else { + tofile(L); /* check that it's a valid file handle */ + lua_pushvalue(L, 1); + } + lua_rawseti(L, LUA_ENVIRONINDEX, f); + } + /* return current value */ + lua_rawgeti(L, LUA_ENVIRONINDEX, f); + return 1; +} + + +static int io_input (lua_State *L) { + return g_iofile(L, IO_INPUT, "r"); +} + + +static int io_output (lua_State *L) { + return g_iofile(L, IO_OUTPUT, "w"); +} + + +static int io_readline (lua_State *L); + + +static void aux_lines (lua_State *L, int idx, int toclose) { + lua_pushvalue(L, idx); + lua_pushboolean(L, toclose); /* close/not close file when finished */ + lua_pushcclosure(L, io_readline, 2); +} + + +static int f_lines (lua_State *L) { + tofile(L); /* check that it's a valid file handle */ + aux_lines(L, 1, 0); + return 1; +} + + +static int io_lines (lua_State *L) { + if (lua_isnoneornil(L, 1)) { /* no arguments? */ + /* will iterate over default input */ + lua_rawgeti(L, LUA_ENVIRONINDEX, IO_INPUT); + return f_lines(L); + } + else { + const char *filename = luaL_checkstring(L, 1); + FILE **pf = newfile(L); + *pf = fopen(filename, "r"); + if (*pf == NULL) + fileerror(L, 1, filename); + aux_lines(L, lua_gettop(L), 1); + return 1; + } +} + + +/* +** {====================================================== +** READ +** ======================================================= +*/ + + +static int read_number (lua_State *L, FILE *f) { + lua_Number d; + if (fscanf(f, LUA_NUMBER_SCAN, &d) == 1) { + lua_pushnumber(L, d); + return 1; + } + else return 0; /* read fails */ +} + + +static int test_eof (lua_State *L, FILE *f) { + int c = getc(f); + ungetc(c, f); + lua_pushlstring(L, NULL, 0); + return (c != EOF); +} + + +static int read_line (lua_State *L, FILE *f) { + luaL_Buffer b; + luaL_buffinit(L, &b); + for (;;) { + size_t l; + char *p = luaL_prepbuffer(&b); + if (fgets(p, LUAL_BUFFERSIZE, f) == NULL) { /* eof? */ + luaL_pushresult(&b); /* close buffer */ + return (lua_strlen(L, -1) > 0); /* check whether read something */ + } + l = strlen(p); + if (l == 0 || p[l-1] != '\n') + luaL_addsize(&b, l); + else { + luaL_addsize(&b, l - 1); /* do not include `eol' */ + luaL_pushresult(&b); /* close buffer */ + return 1; /* read at least an `eol' */ + } + } +} + + +static int read_chars (lua_State *L, FILE *f, size_t n) { + size_t rlen; /* how much to read */ + size_t nr; /* number of chars actually read */ + luaL_Buffer b; + luaL_buffinit(L, &b); + rlen = LUAL_BUFFERSIZE; /* try to read that much each time */ + do { + char *p = luaL_prepbuffer(&b); + if (rlen > n) rlen = n; /* cannot read more than asked */ + nr = fread(p, sizeof(char), rlen, f); + luaL_addsize(&b, nr); + n -= nr; /* still have to read `n' chars */ + } while (n > 0 && nr == rlen); /* until end of count or eof */ + luaL_pushresult(&b); /* close buffer */ + return (n == 0 || lua_strlen(L, -1) > 0); +} + + +static int g_read (lua_State *L, FILE *f, int first) { + int nargs = lua_gettop(L) - 1; + int success; + int n; + clearerr(f); + if (nargs == 0) { /* no arguments? */ + success = read_line(L, f); + n = first+1; /* to return 1 result */ + } + else { /* ensure stack space for all results and for auxlib's buffer */ + luaL_checkstack(L, nargs+LUA_MINSTACK, "too many arguments"); + success = 1; + for (n = first; nargs-- && success; n++) { + if (lua_type(L, n) == LUA_TNUMBER) { + size_t l = (size_t)lua_tointeger(L, n); + success = (l == 0) ? test_eof(L, f) : read_chars(L, f, l); + } + else { + const char *p = lua_tostring(L, n); + luaL_argcheck(L, p && p[0] == '*', n, "invalid option"); + switch (p[1]) { + case 'n': /* number */ + success = read_number(L, f); + break; + case 'l': /* line */ + success = read_line(L, f); + break; + case 'a': /* file */ + read_chars(L, f, ~((size_t)0)); /* read MAX_SIZE_T chars */ + success = 1; /* always success */ + break; + default: + return luaL_argerror(L, n, "invalid format"); + } + } + } + } + if (ferror(f)) + return pushresult(L, 0, NULL); + if (!success) { + lua_pop(L, 1); /* remove last result */ + lua_pushnil(L); /* push nil instead */ + } + return n - first; +} + + +static int io_read (lua_State *L) { + return g_read(L, getiofile(L, IO_INPUT), 1); +} + + +static int f_read (lua_State *L) { + return g_read(L, tofile(L), 2); +} + + +static int io_readline (lua_State *L) { + FILE *f = *(FILE **)lua_touserdata(L, lua_upvalueindex(1)); + int sucess; + if (f == NULL) /* file is already closed? */ + luaL_error(L, "file is already closed"); + sucess = read_line(L, f); + if (ferror(f)) + return luaL_error(L, "%s", strerror(errno)); + if (sucess) return 1; + else { /* EOF */ + if (lua_toboolean(L, lua_upvalueindex(2))) { /* generator created file? */ + lua_settop(L, 0); + lua_pushvalue(L, lua_upvalueindex(1)); + aux_close(L); /* close it */ + } + return 0; + } +} + +/* }====================================================== */ + + +static int g_write (lua_State *L, FILE *f, int arg) { + int nargs = lua_gettop(L) - 1; + int status = 1; + for (; nargs--; arg++) { + if (lua_type(L, arg) == LUA_TNUMBER) { + /* optimization: could be done exactly as for strings */ + status = status && + fprintf(f, LUA_NUMBER_FMT, lua_tonumber(L, arg)) > 0; + } + else { + size_t l; + const char *s = luaL_checklstring(L, arg, &l); + status = status && (fwrite(s, sizeof(char), l, f) == l); + } + } + return pushresult(L, status, NULL); +} + + +static int io_write (lua_State *L) { + return g_write(L, getiofile(L, IO_OUTPUT), 1); +} + + +static int f_write (lua_State *L) { + return g_write(L, tofile(L), 2); +} + + +static int f_seek (lua_State *L) { + static const int mode[] = {SEEK_SET, SEEK_CUR, SEEK_END}; + static const char *const modenames[] = {"set", "cur", "end", NULL}; + FILE *f = tofile(L); + int op = luaL_checkoption(L, 2, "cur", modenames); + long offset = luaL_optlong(L, 3, 0); + op = fseek(f, offset, mode[op]); + if (op) + return pushresult(L, 0, NULL); /* error */ + else { + lua_pushinteger(L, ftell(f)); + return 1; + } +} + + +static int f_setvbuf (lua_State *L) { + static const int mode[] = {_IONBF, _IOFBF, _IOLBF}; + static const char *const modenames[] = {"no", "full", "line", NULL}; + FILE *f = tofile(L); + int op = luaL_checkoption(L, 2, NULL, modenames); + lua_Integer sz = luaL_optinteger(L, 3, LUAL_BUFFERSIZE); + int res = setvbuf(f, NULL, mode[op], sz); + return pushresult(L, res == 0, NULL); +} + + + +static int io_flush (lua_State *L) { + return pushresult(L, fflush(getiofile(L, IO_OUTPUT)) == 0, NULL); +} + + +static int f_flush (lua_State *L) { + return pushresult(L, fflush(tofile(L)) == 0, NULL); +} + + +static const luaL_Reg iolib[] = { + {"close", io_close}, + {"flush", io_flush}, + {"input", io_input}, + {"lines", io_lines}, + {"open", io_open}, + {"output", io_output}, + {"popen", io_popen}, + {"read", io_read}, + {"tmpfile", io_tmpfile}, + {"type", io_type}, + {"write", io_write}, + {NULL, NULL} +}; + + +static const luaL_Reg flib[] = { + {"close", io_close}, + {"flush", f_flush}, + {"lines", f_lines}, + {"read", f_read}, + {"seek", f_seek}, + {"setvbuf", f_setvbuf}, + {"write", f_write}, + {"__gc", io_gc}, + {"__tostring", io_tostring}, + {NULL, NULL} +}; + + +static void createmeta (lua_State *L) { + luaL_newmetatable(L, LUA_FILEHANDLE); /* create metatable for file handles */ + lua_pushvalue(L, -1); /* push metatable */ + lua_setfield(L, -2, "__index"); /* metatable.__index = metatable */ + luaL_register(L, NULL, flib); /* file methods */ +} + + +static void createstdfile (lua_State *L, FILE *f, int k, const char *fname) { + *newfile(L) = f; + if (k > 0) { + lua_pushvalue(L, -1); + lua_rawseti(L, LUA_ENVIRONINDEX, k); + } + lua_setfield(L, -2, fname); +} + + +LUALIB_API int luaopen_io (lua_State *L) { + createmeta(L); + /* create (private) environment (with fields IO_INPUT, IO_OUTPUT, __close) */ + lua_createtable(L, 2, 1); + lua_replace(L, LUA_ENVIRONINDEX); + /* open library */ + luaL_register(L, LUA_IOLIBNAME, iolib); + /* create (and set) default files */ + createstdfile(L, stdin, IO_INPUT, "stdin"); + createstdfile(L, stdout, IO_OUTPUT, "stdout"); + createstdfile(L, stderr, 0, "stderr"); + /* create environment for 'popen' */ + lua_getfield(L, -1, "popen"); + lua_createtable(L, 0, 1); + lua_pushcfunction(L, io_pclose); + lua_setfield(L, -2, "__close"); + lua_setfenv(L, -2); + lua_pop(L, 1); /* pop 'popen' */ + /* set default close function */ + lua_pushcfunction(L, io_fclose); + lua_setfield(L, LUA_ENVIRONINDEX, "__close"); + return 1; +} + diff --git a/deps/lua/src/llex.c b/deps/lua/src/llex.c new file mode 100644 index 0000000000000000000000000000000000000000..f3022df59681cd6077b8dba3de75625d085abb02 --- /dev/null +++ b/deps/lua/src/llex.c @@ -0,0 +1,460 @@ +/* +** $Id: llex.c,v 2.19 2006/02/06 18:28:16 roberto Exp $ +** Lexical Analyzer +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define llex_c +#define LUA_CORE + +#include "lua.h" + +#include "ldo.h" +#include "llex.h" +#include "lobject.h" +#include "lparser.h" +#include "lstate.h" +#include "lstring.h" +#include "lzio.h" + + + +#define next(ls) (ls->current = zgetc(ls->z)) + + + + +#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r') + + +/* ORDER RESERVED */ +const char *const luaX_tokens [] = { + "and", "break", "do", "else", "elseif", + "end", "false", "for", "function", "if", + "in", "local", "nil", "not", "or", "repeat", + "return", "then", "true", "until", "while", + "..", "...", "==", ">=", "<=", "~=", + "", "", "", "", + NULL +}; + + +#define save_and_next(ls) (save(ls, ls->current), next(ls)) + + +static void save (LexState *ls, int c) { + Mbuffer *b = ls->buff; + if (b->n + 1 > b->buffsize) { + size_t newsize; + if (b->buffsize >= MAX_SIZET/2) + luaX_lexerror(ls, "lexical element too long", 0); + newsize = b->buffsize * 2; + luaZ_resizebuffer(ls->L, b, newsize); + } + b->buffer[b->n++] = cast(char, c); +} + + +void luaX_init (lua_State *L) { + int i; + for (i=0; itsv.reserved = cast_byte(i+1); /* reserved word */ + } +} + + +#define MAXSRC 80 + + +const char *luaX_token2str (LexState *ls, int token) { + if (token < FIRST_RESERVED) { + lua_assert(token == cast(unsigned char, token)); + return (iscntrl(token)) ? luaO_pushfstring(ls->L, "char(%d)", token) : + luaO_pushfstring(ls->L, "%c", token); + } + else + return luaX_tokens[token-FIRST_RESERVED]; +} + + +static const char *txtToken (LexState *ls, int token) { + switch (token) { + case TK_NAME: + case TK_STRING: + case TK_NUMBER: + save(ls, '\0'); + return luaZ_buffer(ls->buff); + default: + return luaX_token2str(ls, token); + } +} + + +void luaX_lexerror (LexState *ls, const char *msg, int token) { + char buff[MAXSRC]; + luaO_chunkid(buff, getstr(ls->source), MAXSRC); + msg = luaO_pushfstring(ls->L, "%s:%d: %s", buff, ls->linenumber, msg); + if (token) + luaO_pushfstring(ls->L, "%s near " LUA_QS, msg, txtToken(ls, token)); + luaD_throw(ls->L, LUA_ERRSYNTAX); +} + + +void luaX_syntaxerror (LexState *ls, const char *msg) { + luaX_lexerror(ls, msg, ls->t.token); +} + + +TString *luaX_newstring (LexState *ls, const char *str, size_t l) { + lua_State *L = ls->L; + TString *ts = luaS_newlstr(L, str, l); + TValue *o = luaH_setstr(L, ls->fs->h, ts); /* entry for `str' */ + if (ttisnil(o)) + setbvalue(o, 1); /* make sure `str' will not be collected */ + return ts; +} + + +static void inclinenumber (LexState *ls) { + int old = ls->current; + lua_assert(currIsNewline(ls)); + next(ls); /* skip `\n' or `\r' */ + if (currIsNewline(ls) && ls->current != old) + next(ls); /* skip `\n\r' or `\r\n' */ + if (++ls->linenumber >= MAX_INT) + luaX_syntaxerror(ls, "chunk has too many lines"); +} + + +void luaX_setinput (lua_State *L, LexState *ls, ZIO *z, TString *source) { + ls->decpoint = '.'; + ls->L = L; + ls->lookahead.token = TK_EOS; /* no look-ahead token */ + ls->z = z; + ls->fs = NULL; + ls->linenumber = 1; + ls->lastline = 1; + ls->source = source; + luaZ_resizebuffer(ls->L, ls->buff, LUA_MINBUFFER); /* initialize buffer */ + next(ls); /* read first char */ +} + + + +/* +** ======================================================= +** LEXICAL ANALYZER +** ======================================================= +*/ + + + +static int check_next (LexState *ls, const char *set) { + if (!strchr(set, ls->current)) + return 0; + save_and_next(ls); + return 1; +} + + +static void buffreplace (LexState *ls, char from, char to) { + size_t n = luaZ_bufflen(ls->buff); + char *p = luaZ_buffer(ls->buff); + while (n--) + if (p[n] == from) p[n] = to; +} + + +static void trydecpoint (LexState *ls, SemInfo *seminfo) { + /* format error: try to update decimal point separator */ + struct lconv *cv = localeconv(); + char old = ls->decpoint; + ls->decpoint = (cv ? cv->decimal_point[0] : '.'); + buffreplace(ls, old, ls->decpoint); /* try updated decimal separator */ + if (!luaO_str2d(luaZ_buffer(ls->buff), &seminfo->r)) { + /* format error with correct decimal point: no more options */ + buffreplace(ls, ls->decpoint, '.'); /* undo change (for error message) */ + luaX_lexerror(ls, "malformed number", TK_NUMBER); + } +} + + +/* LUA_NUMBER */ +static void read_numeral (LexState *ls, SemInfo *seminfo) { + lua_assert(isdigit(ls->current)); + do { + save_and_next(ls); + } while (isdigit(ls->current) || ls->current == '.'); + if (check_next(ls, "Ee")) /* `E'? */ + check_next(ls, "+-"); /* optional exponent sign */ + while (isalnum(ls->current) || ls->current == '_') + save_and_next(ls); + save(ls, '\0'); + buffreplace(ls, '.', ls->decpoint); /* follow locale for decimal point */ + if (!luaO_str2d(luaZ_buffer(ls->buff), &seminfo->r)) /* format error? */ + trydecpoint(ls, seminfo); /* try to update decimal point separator */ +} + + +static int skip_sep (LexState *ls) { + int count = 0; + int s = ls->current; + lua_assert(s == '[' || s == ']'); + save_and_next(ls); + while (ls->current == '=') { + save_and_next(ls); + count++; + } + return (ls->current == s) ? count : (-count) - 1; +} + + +static void read_long_string (LexState *ls, SemInfo *seminfo, int sep) { + int cont = 0; + (void)(cont); /* avoid warnings when `cont' is not used */ + save_and_next(ls); /* skip 2nd `[' */ + if (currIsNewline(ls)) /* string starts with a newline? */ + inclinenumber(ls); /* skip it */ + for (;;) { + switch (ls->current) { + case EOZ: + luaX_lexerror(ls, (seminfo) ? "unfinished long string" : + "unfinished long comment", TK_EOS); + break; /* to avoid warnings */ +#if defined(LUA_COMPAT_LSTR) + case '[': { + if (skip_sep(ls) == sep) { + save_and_next(ls); /* skip 2nd `[' */ + cont++; +#if LUA_COMPAT_LSTR == 1 + if (sep == 0) + luaX_lexerror(ls, "nesting of [[...]] is deprecated", '['); +#endif + } + break; + } +#endif + case ']': { + if (skip_sep(ls) == sep) { + save_and_next(ls); /* skip 2nd `]' */ +#if defined(LUA_COMPAT_LSTR) && LUA_COMPAT_LSTR == 2 + cont--; + if (sep == 0 && cont >= 0) break; +#endif + goto endloop; + } + break; + } + case '\n': + case '\r': { + save(ls, '\n'); + inclinenumber(ls); + if (!seminfo) luaZ_resetbuffer(ls->buff); /* avoid wasting space */ + break; + } + default: { + if (seminfo) save_and_next(ls); + else next(ls); + } + } + } endloop: + if (seminfo) + seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + (2 + sep), + luaZ_bufflen(ls->buff) - 2*(2 + sep)); +} + + +static void read_string (LexState *ls, int del, SemInfo *seminfo) { + save_and_next(ls); + while (ls->current != del) { + switch (ls->current) { + case EOZ: + luaX_lexerror(ls, "unfinished string", TK_EOS); + continue; /* to avoid warnings */ + case '\n': + case '\r': + luaX_lexerror(ls, "unfinished string", TK_STRING); + continue; /* to avoid warnings */ + case '\\': { + int c; + next(ls); /* do not save the `\' */ + switch (ls->current) { + case 'a': c = '\a'; break; + case 'b': c = '\b'; break; + case 'f': c = '\f'; break; + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'v': c = '\v'; break; + case '\n': /* go through */ + case '\r': save(ls, '\n'); inclinenumber(ls); continue; + case EOZ: continue; /* will raise an error next loop */ + default: { + if (!isdigit(ls->current)) + save_and_next(ls); /* handles \\, \", \', and \? */ + else { /* \xxx */ + int i = 0; + c = 0; + do { + c = 10*c + (ls->current-'0'); + next(ls); + } while (++i<3 && isdigit(ls->current)); + if (c > UCHAR_MAX) + luaX_lexerror(ls, "escape sequence too large", TK_STRING); + save(ls, c); + } + continue; + } + } + save(ls, c); + next(ls); + continue; + } + default: + save_and_next(ls); + } + } + save_and_next(ls); /* skip delimiter */ + seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + 1, + luaZ_bufflen(ls->buff) - 2); +} + + +static int llex (LexState *ls, SemInfo *seminfo) { + luaZ_resetbuffer(ls->buff); + for (;;) { + switch (ls->current) { + case '\n': + case '\r': { + inclinenumber(ls); + continue; + } + case '-': { + next(ls); + if (ls->current != '-') return '-'; + /* else is a comment */ + next(ls); + if (ls->current == '[') { + int sep = skip_sep(ls); + luaZ_resetbuffer(ls->buff); /* `skip_sep' may dirty the buffer */ + if (sep >= 0) { + read_long_string(ls, NULL, sep); /* long comment */ + luaZ_resetbuffer(ls->buff); + continue; + } + } + /* else short comment */ + while (!currIsNewline(ls) && ls->current != EOZ) + next(ls); + continue; + } + case '[': { + int sep = skip_sep(ls); + if (sep >= 0) { + read_long_string(ls, seminfo, sep); + return TK_STRING; + } + else if (sep == -1) return '['; + else luaX_lexerror(ls, "invalid long string delimiter", TK_STRING); + } + case '=': { + next(ls); + if (ls->current != '=') return '='; + else { next(ls); return TK_EQ; } + } + case '<': { + next(ls); + if (ls->current != '=') return '<'; + else { next(ls); return TK_LE; } + } + case '>': { + next(ls); + if (ls->current != '=') return '>'; + else { next(ls); return TK_GE; } + } + case '~': { + next(ls); + if (ls->current != '=') return '~'; + else { next(ls); return TK_NE; } + } + case '"': + case '\'': { + read_string(ls, ls->current, seminfo); + return TK_STRING; + } + case '.': { + save_and_next(ls); + if (check_next(ls, ".")) { + if (check_next(ls, ".")) + return TK_DOTS; /* ... */ + else return TK_CONCAT; /* .. */ + } + else if (!isdigit(ls->current)) return '.'; + else { + read_numeral(ls, seminfo); + return TK_NUMBER; + } + } + case EOZ: { + return TK_EOS; + } + default: { + if (isspace(ls->current)) { + lua_assert(!currIsNewline(ls)); + next(ls); + continue; + } + else if (isdigit(ls->current)) { + read_numeral(ls, seminfo); + return TK_NUMBER; + } + else if (isalpha(ls->current) || ls->current == '_') { + /* identifier or reserved word */ + TString *ts; + do { + save_and_next(ls); + } while (isalnum(ls->current) || ls->current == '_'); + ts = luaX_newstring(ls, luaZ_buffer(ls->buff), + luaZ_bufflen(ls->buff)); + if (ts->tsv.reserved > 0) /* reserved word? */ + return ts->tsv.reserved - 1 + FIRST_RESERVED; + else { + seminfo->ts = ts; + return TK_NAME; + } + } + else { + int c = ls->current; + next(ls); + return c; /* single-char tokens (+ - / ...) */ + } + } + } + } +} + + +void luaX_next (LexState *ls) { + ls->lastline = ls->linenumber; + if (ls->lookahead.token != TK_EOS) { /* is there a look-ahead token? */ + ls->t = ls->lookahead; /* use this one */ + ls->lookahead.token = TK_EOS; /* and discharge it */ + } + else + ls->t.token = llex(ls, &ls->t.seminfo); /* read next token */ +} + + +void luaX_lookahead (LexState *ls) { + lua_assert(ls->lookahead.token == TK_EOS); + ls->lookahead.token = llex(ls, &ls->lookahead.seminfo); +} + diff --git a/deps/lua/src/llex.h b/deps/lua/src/llex.h new file mode 100644 index 0000000000000000000000000000000000000000..d4ca7f20e1f243cfec83ed722242dc64d6f4a3cd --- /dev/null +++ b/deps/lua/src/llex.h @@ -0,0 +1,81 @@ +/* +** $Id: llex.h,v 1.57 2005/12/07 15:43:05 roberto Exp $ +** Lexical Analyzer +** See Copyright Notice in lua.h +*/ + +#ifndef llex_h +#define llex_h + +#include "lobject.h" +#include "lzio.h" + + +#define FIRST_RESERVED 257 + +/* maximum length of a reserved word */ +#define TOKEN_LEN (sizeof("function")/sizeof(char)) + + +/* +* WARNING: if you change the order of this enumeration, +* grep "ORDER RESERVED" +*/ +enum RESERVED { + /* terminal symbols denoted by reserved words */ + TK_AND = FIRST_RESERVED, TK_BREAK, + TK_DO, TK_ELSE, TK_ELSEIF, TK_END, TK_FALSE, TK_FOR, TK_FUNCTION, + TK_IF, TK_IN, TK_LOCAL, TK_NIL, TK_NOT, TK_OR, TK_REPEAT, + TK_RETURN, TK_THEN, TK_TRUE, TK_UNTIL, TK_WHILE, + /* other terminal symbols */ + TK_CONCAT, TK_DOTS, TK_EQ, TK_GE, TK_LE, TK_NE, TK_NUMBER, + TK_NAME, TK_STRING, TK_EOS +}; + +/* number of reserved words */ +#define NUM_RESERVED (cast(int, TK_WHILE-FIRST_RESERVED+1)) + + +/* array with token `names' */ +LUAI_DATA const char *const luaX_tokens []; + + +typedef union { + lua_Number r; + TString *ts; +} SemInfo; /* semantics information */ + + +typedef struct Token { + int token; + SemInfo seminfo; +} Token; + + +typedef struct LexState { + int current; /* current character (charint) */ + int linenumber; /* input line counter */ + int lastline; /* line of last token `consumed' */ + Token t; /* current token */ + Token lookahead; /* look ahead token */ + struct FuncState *fs; /* `FuncState' is private to the parser */ + struct lua_State *L; + ZIO *z; /* input stream */ + Mbuffer *buff; /* buffer for tokens */ + TString *source; /* current source name */ + char decpoint; /* locale decimal point */ +} LexState; + + +LUAI_FUNC void luaX_init (lua_State *L); +LUAI_FUNC void luaX_setinput (lua_State *L, LexState *LS, ZIO *z, + TString *source); +LUAI_FUNC TString *luaX_newstring (LexState *LS, const char *str, size_t l); +LUAI_FUNC void luaX_next (LexState *ls); +LUAI_FUNC void luaX_lookahead (LexState *ls); +LUAI_FUNC void luaX_lexerror (LexState *ls, const char *msg, int token); +LUAI_FUNC void luaX_syntaxerror (LexState *ls, const char *s); +LUAI_FUNC const char *luaX_token2str (LexState *ls, int token); + + +#endif diff --git a/deps/lua/src/llimits.h b/deps/lua/src/llimits.h new file mode 100644 index 0000000000000000000000000000000000000000..b03221aeec2ac6144225025d1eecb91ef2ad7f8e --- /dev/null +++ b/deps/lua/src/llimits.h @@ -0,0 +1,128 @@ +/* +** $Id: llimits.h,v 1.69 2005/12/27 17:12:00 roberto Exp $ +** Limits, basic types, and some other `installation-dependent' definitions +** See Copyright Notice in lua.h +*/ + +#ifndef llimits_h +#define llimits_h + + +#include +#include + + +#include "lua.h" + + +typedef LUAI_UINT32 lu_int32; + +typedef LUAI_UMEM lu_mem; + +typedef LUAI_MEM l_mem; + + + +/* chars used as small naturals (so that `char' is reserved for characters) */ +typedef unsigned char lu_byte; + + +#define MAX_SIZET ((size_t)(~(size_t)0)-2) + +#define MAX_LUMEM ((lu_mem)(~(lu_mem)0)-2) + + +#define MAX_INT (INT_MAX-2) /* maximum value of an int (-2 for safety) */ + +/* +** conversion of pointer to integer +** this is for hashing only; there is no problem if the integer +** cannot hold the whole pointer value +*/ +#define IntPoint(p) ((unsigned int)(lu_mem)(p)) + + + +/* type to ensure maximum alignment */ +typedef LUAI_USER_ALIGNMENT_T L_Umaxalign; + + +/* result of a `usual argument conversion' over lua_Number */ +typedef LUAI_UACNUMBER l_uacNumber; + + +/* internal assertions for in-house debugging */ +#ifdef lua_assert + +#define check_exp(c,e) (lua_assert(c), (e)) +#define api_check(l,e) lua_assert(e) + +#else + +#define lua_assert(c) ((void)0) +#define check_exp(c,e) (e) +#define api_check luai_apicheck + +#endif + + +#ifndef UNUSED +#define UNUSED(x) ((void)(x)) /* to avoid warnings */ +#endif + + +#ifndef cast +#define cast(t, exp) ((t)(exp)) +#endif + +#define cast_byte(i) cast(lu_byte, (i)) +#define cast_num(i) cast(lua_Number, (i)) +#define cast_int(i) cast(int, (i)) + + + +/* +** type for virtual-machine instructions +** must be an unsigned with (at least) 4 bytes (see details in lopcodes.h) +*/ +typedef lu_int32 Instruction; + + + +/* maximum stack for a Lua function */ +#define MAXSTACK 250 + + + +/* minimum size for the string table (must be power of 2) */ +#ifndef MINSTRTABSIZE +#define MINSTRTABSIZE 32 +#endif + + +/* minimum size for string buffer */ +#ifndef LUA_MINBUFFER +#define LUA_MINBUFFER 32 +#endif + + +#ifndef lua_lock +#define lua_lock(L) ((void) 0) +#define lua_unlock(L) ((void) 0) +#endif + +#ifndef luai_threadyield +#define luai_threadyield(L) {lua_unlock(L); lua_lock(L);} +#endif + + +/* +** macro to control inclusion of some hard tests on stack reallocation +*/ +#ifndef HARDSTACKTESTS +#define condhardstacktests(x) ((void)0) +#else +#define condhardstacktests(x) x +#endif + +#endif diff --git a/deps/lua/src/lmathlib.c b/deps/lua/src/lmathlib.c new file mode 100644 index 0000000000000000000000000000000000000000..d181a7319438f2adef703c3b5c0779134c78a7eb --- /dev/null +++ b/deps/lua/src/lmathlib.c @@ -0,0 +1,263 @@ +/* +** $Id: lmathlib.c,v 1.67 2005/08/26 17:36:32 roberto Exp $ +** Standard mathematical library +** See Copyright Notice in lua.h +*/ + + +#include +#include + +#define lmathlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +#undef PI +#define PI (3.14159265358979323846) +#define RADIANS_PER_DEGREE (PI/180.0) + + + +static int math_abs (lua_State *L) { + lua_pushnumber(L, fabs(luaL_checknumber(L, 1))); + return 1; +} + +static int math_sin (lua_State *L) { + lua_pushnumber(L, sin(luaL_checknumber(L, 1))); + return 1; +} + +static int math_sinh (lua_State *L) { + lua_pushnumber(L, sinh(luaL_checknumber(L, 1))); + return 1; +} + +static int math_cos (lua_State *L) { + lua_pushnumber(L, cos(luaL_checknumber(L, 1))); + return 1; +} + +static int math_cosh (lua_State *L) { + lua_pushnumber(L, cosh(luaL_checknumber(L, 1))); + return 1; +} + +static int math_tan (lua_State *L) { + lua_pushnumber(L, tan(luaL_checknumber(L, 1))); + return 1; +} + +static int math_tanh (lua_State *L) { + lua_pushnumber(L, tanh(luaL_checknumber(L, 1))); + return 1; +} + +static int math_asin (lua_State *L) { + lua_pushnumber(L, asin(luaL_checknumber(L, 1))); + return 1; +} + +static int math_acos (lua_State *L) { + lua_pushnumber(L, acos(luaL_checknumber(L, 1))); + return 1; +} + +static int math_atan (lua_State *L) { + lua_pushnumber(L, atan(luaL_checknumber(L, 1))); + return 1; +} + +static int math_atan2 (lua_State *L) { + lua_pushnumber(L, atan2(luaL_checknumber(L, 1), luaL_checknumber(L, 2))); + return 1; +} + +static int math_ceil (lua_State *L) { + lua_pushnumber(L, ceil(luaL_checknumber(L, 1))); + return 1; +} + +static int math_floor (lua_State *L) { + lua_pushnumber(L, floor(luaL_checknumber(L, 1))); + return 1; +} + +static int math_fmod (lua_State *L) { + lua_pushnumber(L, fmod(luaL_checknumber(L, 1), luaL_checknumber(L, 2))); + return 1; +} + +static int math_modf (lua_State *L) { + double ip; + double fp = modf(luaL_checknumber(L, 1), &ip); + lua_pushnumber(L, ip); + lua_pushnumber(L, fp); + return 2; +} + +static int math_sqrt (lua_State *L) { + lua_pushnumber(L, sqrt(luaL_checknumber(L, 1))); + return 1; +} + +static int math_pow (lua_State *L) { + lua_pushnumber(L, pow(luaL_checknumber(L, 1), luaL_checknumber(L, 2))); + return 1; +} + +static int math_log (lua_State *L) { + lua_pushnumber(L, log(luaL_checknumber(L, 1))); + return 1; +} + +static int math_log10 (lua_State *L) { + lua_pushnumber(L, log10(luaL_checknumber(L, 1))); + return 1; +} + +static int math_exp (lua_State *L) { + lua_pushnumber(L, exp(luaL_checknumber(L, 1))); + return 1; +} + +static int math_deg (lua_State *L) { + lua_pushnumber(L, luaL_checknumber(L, 1)/RADIANS_PER_DEGREE); + return 1; +} + +static int math_rad (lua_State *L) { + lua_pushnumber(L, luaL_checknumber(L, 1)*RADIANS_PER_DEGREE); + return 1; +} + +static int math_frexp (lua_State *L) { + int e; + lua_pushnumber(L, frexp(luaL_checknumber(L, 1), &e)); + lua_pushinteger(L, e); + return 2; +} + +static int math_ldexp (lua_State *L) { + lua_pushnumber(L, ldexp(luaL_checknumber(L, 1), luaL_checkint(L, 2))); + return 1; +} + + + +static int math_min (lua_State *L) { + int n = lua_gettop(L); /* number of arguments */ + lua_Number dmin = luaL_checknumber(L, 1); + int i; + for (i=2; i<=n; i++) { + lua_Number d = luaL_checknumber(L, i); + if (d < dmin) + dmin = d; + } + lua_pushnumber(L, dmin); + return 1; +} + + +static int math_max (lua_State *L) { + int n = lua_gettop(L); /* number of arguments */ + lua_Number dmax = luaL_checknumber(L, 1); + int i; + for (i=2; i<=n; i++) { + lua_Number d = luaL_checknumber(L, i); + if (d > dmax) + dmax = d; + } + lua_pushnumber(L, dmax); + return 1; +} + + +static int math_random (lua_State *L) { + /* the `%' avoids the (rare) case of r==1, and is needed also because on + some systems (SunOS!) `rand()' may return a value larger than RAND_MAX */ + lua_Number r = (lua_Number)(rand()%RAND_MAX) / (lua_Number)RAND_MAX; + switch (lua_gettop(L)) { /* check number of arguments */ + case 0: { /* no arguments */ + lua_pushnumber(L, r); /* Number between 0 and 1 */ + break; + } + case 1: { /* only upper limit */ + int u = luaL_checkint(L, 1); + luaL_argcheck(L, 1<=u, 1, "interval is empty"); + lua_pushnumber(L, floor(r*u)+1); /* int between 1 and `u' */ + break; + } + case 2: { /* lower and upper limits */ + int l = luaL_checkint(L, 1); + int u = luaL_checkint(L, 2); + luaL_argcheck(L, l<=u, 2, "interval is empty"); + lua_pushnumber(L, floor(r*(u-l+1))+l); /* int between `l' and `u' */ + break; + } + default: return luaL_error(L, "wrong number of arguments"); + } + return 1; +} + + +static int math_randomseed (lua_State *L) { + srand(luaL_checkint(L, 1)); + return 0; +} + + +static const luaL_Reg mathlib[] = { + {"abs", math_abs}, + {"acos", math_acos}, + {"asin", math_asin}, + {"atan2", math_atan2}, + {"atan", math_atan}, + {"ceil", math_ceil}, + {"cosh", math_cosh}, + {"cos", math_cos}, + {"deg", math_deg}, + {"exp", math_exp}, + {"floor", math_floor}, + {"fmod", math_fmod}, + {"frexp", math_frexp}, + {"ldexp", math_ldexp}, + {"log10", math_log10}, + {"log", math_log}, + {"max", math_max}, + {"min", math_min}, + {"modf", math_modf}, + {"pow", math_pow}, + {"rad", math_rad}, + {"random", math_random}, + {"randomseed", math_randomseed}, + {"sinh", math_sinh}, + {"sin", math_sin}, + {"sqrt", math_sqrt}, + {"tanh", math_tanh}, + {"tan", math_tan}, + {NULL, NULL} +}; + + +/* +** Open math library +*/ +LUALIB_API int luaopen_math (lua_State *L) { + luaL_register(L, LUA_MATHLIBNAME, mathlib); + lua_pushnumber(L, PI); + lua_setfield(L, -2, "pi"); + lua_pushnumber(L, HUGE_VAL); + lua_setfield(L, -2, "huge"); +#if defined(LUA_COMPAT_MOD) + lua_getfield(L, -1, "fmod"); + lua_setfield(L, -2, "mod"); +#endif + return 1; +} + diff --git a/deps/lua/src/lmem.c b/deps/lua/src/lmem.c new file mode 100644 index 0000000000000000000000000000000000000000..cef2bc5f8513fa99623cb00699586e9a18e187b7 --- /dev/null +++ b/deps/lua/src/lmem.c @@ -0,0 +1,86 @@ +/* +** $Id: lmem.c,v 1.70 2005/12/26 13:35:47 roberto Exp $ +** Interface to Memory Manager +** See Copyright Notice in lua.h +*/ + + +#include + +#define lmem_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" + + + +/* +** About the realloc function: +** void * frealloc (void *ud, void *ptr, size_t osize, size_t nsize); +** (`osize' is the old size, `nsize' is the new size) +** +** Lua ensures that (ptr == NULL) iff (osize == 0). +** +** * frealloc(ud, NULL, 0, x) creates a new block of size `x' +** +** * frealloc(ud, p, x, 0) frees the block `p' +** (in this specific case, frealloc must return NULL). +** particularly, frealloc(ud, NULL, 0, 0) does nothing +** (which is equivalent to free(NULL) in ANSI C) +** +** frealloc returns NULL if it cannot create or reallocate the area +** (any reallocation to an equal or smaller size cannot fail!) +*/ + + + +#define MINSIZEARRAY 4 + + +void *luaM_growaux_ (lua_State *L, void *block, int *size, size_t size_elems, + int limit, const char *errormsg) { + void *newblock; + int newsize; + if (*size >= limit/2) { /* cannot double it? */ + if (*size >= limit) /* cannot grow even a little? */ + luaG_runerror(L, errormsg); + newsize = limit; /* still have at least one free place */ + } + else { + newsize = (*size)*2; + if (newsize < MINSIZEARRAY) + newsize = MINSIZEARRAY; /* minimum size */ + } + newblock = luaM_reallocv(L, block, *size, newsize, size_elems); + *size = newsize; /* update only when everything else is OK */ + return newblock; +} + + +void *luaM_toobig (lua_State *L) { + luaG_runerror(L, "memory allocation error: block too big"); + return NULL; /* to avoid warnings */ +} + + + +/* +** generic allocation routine. +*/ +void *luaM_realloc_ (lua_State *L, void *block, size_t osize, size_t nsize) { + global_State *g = G(L); + lua_assert((osize == 0) == (block == NULL)); + block = (*g->frealloc)(g->ud, block, osize, nsize); + if (block == NULL && nsize > 0) + luaD_throw(L, LUA_ERRMEM); + lua_assert((nsize == 0) == (block == NULL)); + g->totalbytes = (g->totalbytes - osize) + nsize; + return block; +} + diff --git a/deps/lua/src/lmem.h b/deps/lua/src/lmem.h new file mode 100644 index 0000000000000000000000000000000000000000..19df1fbb6c90f182bc27286d7c482f44605baf78 --- /dev/null +++ b/deps/lua/src/lmem.h @@ -0,0 +1,49 @@ +/* +** $Id: lmem.h,v 1.31 2005/04/25 19:24:10 roberto Exp $ +** Interface to Memory Manager +** See Copyright Notice in lua.h +*/ + +#ifndef lmem_h +#define lmem_h + + +#include + +#include "llimits.h" +#include "lua.h" + +#define MEMERRMSG "not enough memory" + + +#define luaM_reallocv(L,b,on,n,e) \ + ((cast(size_t, (n)+1) <= MAX_SIZET/(e)) ? /* +1 to avoid warnings */ \ + luaM_realloc_(L, (b), (on)*(e), (n)*(e)) : \ + luaM_toobig(L)) + +#define luaM_freemem(L, b, s) luaM_realloc_(L, (b), (s), 0) +#define luaM_free(L, b) luaM_realloc_(L, (b), sizeof(*(b)), 0) +#define luaM_freearray(L, b, n, t) luaM_reallocv(L, (b), n, 0, sizeof(t)) + +#define luaM_malloc(L,t) luaM_realloc_(L, NULL, 0, (t)) +#define luaM_new(L,t) cast(t *, luaM_malloc(L, sizeof(t))) +#define luaM_newvector(L,n,t) \ + cast(t *, luaM_reallocv(L, NULL, 0, n, sizeof(t))) + +#define luaM_growvector(L,v,nelems,size,t,limit,e) \ + if ((nelems)+1 > (size)) \ + ((v)=cast(t *, luaM_growaux_(L,v,&(size),sizeof(t),limit,e))) + +#define luaM_reallocvector(L, v,oldn,n,t) \ + ((v)=cast(t *, luaM_reallocv(L, v, oldn, n, sizeof(t)))) + + +LUAI_FUNC void *luaM_realloc_ (lua_State *L, void *block, size_t oldsize, + size_t size); +LUAI_FUNC void *luaM_toobig (lua_State *L); +LUAI_FUNC void *luaM_growaux_ (lua_State *L, void *block, int *size, + size_t size_elem, int limit, + const char *errormsg); + +#endif + diff --git a/deps/lua/src/loadlib.c b/deps/lua/src/loadlib.c new file mode 100644 index 0000000000000000000000000000000000000000..19edaca0fd25a2f418152d58358b2eccb850910a --- /dev/null +++ b/deps/lua/src/loadlib.c @@ -0,0 +1,667 @@ +/* +** $Id: loadlib.c,v 1.51 2005/12/29 15:32:11 roberto Exp $ +** Dynamic library loader for Lua +** See Copyright Notice in lua.h +** +** This module contains an implementation of loadlib for Unix systems +** that have dlfcn, an implementation for Darwin (Mac OS X), an +** implementation for Windows, and a stub for other systems. +*/ + + +#include +#include + + +#define loadlib_c +#define LUA_LIB + +#include "lauxlib.h" +#include "lobject.h" +#include "lua.h" +#include "lualib.h" + + +/* environment variables that hold the search path for packages */ +#define LUA_PATH "LUA_PATH" +#define LUA_CPATH "LUA_CPATH" + +/* prefix for open functions in C libraries */ +#define LUA_POF "luaopen_" + +/* separator for open functions in C libraries */ +#define LUA_OFSEP "_" + + +#define LIBPREFIX "LOADLIB: " + +#define POF LUA_POF +#define LIB_FAIL "open" + + +/* error codes for ll_loadfunc */ +#define ERRLIB 1 +#define ERRFUNC 2 + +#define setprogdir(L) ((void)0) + + +static void ll_unloadlib (void *lib); +static void *ll_load (lua_State *L, const char *path); +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym); + + + +#if defined(LUA_DL_DLOPEN) +/* +** {======================================================================== +** This is an implementation of loadlib based on the dlfcn interface. +** The dlfcn interface is available in Linux, SunOS, Solaris, IRIX, FreeBSD, +** NetBSD, AIX 4.2, HPUX 11, and probably most other Unix flavors, at least +** as an emulation layer on top of native functions. +** ========================================================================= +*/ + +#include + +static void ll_unloadlib (void *lib) { + dlclose(lib); +} + + +static void *ll_load (lua_State *L, const char *path) { + void *lib = dlopen(path, RTLD_NOW); + if (lib == NULL) lua_pushstring(L, dlerror()); + return lib; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + lua_CFunction f = (lua_CFunction)dlsym(lib, sym); + if (f == NULL) lua_pushstring(L, dlerror()); + return f; +} + +/* }====================================================== */ + + + +#elif defined(LUA_DL_DLL) +/* +** {====================================================================== +** This is an implementation of loadlib for Windows using native functions. +** ======================================================================= +*/ + +#include + + +#undef setprogdir + +static void setprogdir (lua_State *L) { + char buff[MAX_PATH + 1]; + char *lb; + DWORD nsize = sizeof(buff)/sizeof(char); + DWORD n = GetModuleFileName(NULL, buff, nsize); + if (n == 0 || n == nsize || (lb = strrchr(buff, '\\')) == NULL) + luaL_error(L, "unable to get ModuleFileName"); + else { + *lb = '\0'; + luaL_gsub(L, lua_tostring(L, -1), LUA_EXECDIR, buff); + lua_remove(L, -2); /* remove original string */ + } +} + + +static void pusherror (lua_State *L) { + int error = GetLastError(); + char buffer[128]; + if (FormatMessage(FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM, + NULL, error, 0, buffer, sizeof(buffer), NULL)) + lua_pushstring(L, buffer); + else + lua_pushfstring(L, "system error %d\n", error); +} + +static void ll_unloadlib (void *lib) { + FreeLibrary((HINSTANCE)lib); +} + + +static void *ll_load (lua_State *L, const char *path) { + HINSTANCE lib = LoadLibrary(path); + if (lib == NULL) pusherror(L); + return lib; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + lua_CFunction f = (lua_CFunction)GetProcAddress((HINSTANCE)lib, sym); + if (f == NULL) pusherror(L); + return f; +} + +/* }====================================================== */ + + + +#elif defined(LUA_DL_DYLD) +/* +** {====================================================================== +** Native Mac OS X / Darwin Implementation +** ======================================================================= +*/ + +#include + + +/* Mac appends a `_' before C function names */ +#undef POF +#define POF "_" LUA_POF + + +static void pusherror (lua_State *L) { + const char *err_str; + const char *err_file; + NSLinkEditErrors err; + int err_num; + NSLinkEditError(&err, &err_num, &err_file, &err_str); + lua_pushstring(L, err_str); +} + + +static const char *errorfromcode (NSObjectFileImageReturnCode ret) { + switch (ret) { + case NSObjectFileImageInappropriateFile: + return "file is not a bundle"; + case NSObjectFileImageArch: + return "library is for wrong CPU type"; + case NSObjectFileImageFormat: + return "bad format"; + case NSObjectFileImageAccess: + return "cannot access file"; + case NSObjectFileImageFailure: + default: + return "unable to load library"; + } +} + + +static void ll_unloadlib (void *lib) { + NSUnLinkModule((NSModule)lib, NSUNLINKMODULE_OPTION_RESET_LAZY_REFERENCES); +} + + +static void *ll_load (lua_State *L, const char *path) { + NSObjectFileImage img; + NSObjectFileImageReturnCode ret; + /* this would be a rare case, but prevents crashing if it happens */ + if(!_dyld_present()) { + lua_pushliteral(L, "dyld not present"); + return NULL; + } + ret = NSCreateObjectFileImageFromFile(path, &img); + if (ret == NSObjectFileImageSuccess) { + NSModule mod = NSLinkModule(img, path, NSLINKMODULE_OPTION_PRIVATE | + NSLINKMODULE_OPTION_RETURN_ON_ERROR); + NSDestroyObjectFileImage(img); + if (mod == NULL) pusherror(L); + return mod; + } + lua_pushstring(L, errorfromcode(ret)); + return NULL; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + NSSymbol nss = NSLookupSymbolInModule((NSModule)lib, sym); + if (nss == NULL) { + lua_pushfstring(L, "symbol " LUA_QS " not found", sym); + return NULL; + } + return (lua_CFunction)NSAddressOfSymbol(nss); +} + +/* }====================================================== */ + + + +#else +/* +** {====================================================== +** Fallback for other systems +** ======================================================= +*/ + +#undef LIB_FAIL +#define LIB_FAIL "absent" + + +#define DLMSG "dynamic libraries not enabled; check your Lua installation" + + +static void ll_unloadlib (void *lib) { + (void)lib; /* to avoid warnings */ +} + + +static void *ll_load (lua_State *L, const char *path) { + (void)path; /* to avoid warnings */ + lua_pushliteral(L, DLMSG); + return NULL; +} + + +static lua_CFunction ll_sym (lua_State *L, void *lib, const char *sym) { + (void)lib; (void)sym; /* to avoid warnings */ + lua_pushliteral(L, DLMSG); + return NULL; +} + +/* }====================================================== */ +#endif + + + +static void **ll_register (lua_State *L, const char *path) { + void **plib; + lua_pushfstring(L, "%s%s", LIBPREFIX, path); + lua_gettable(L, LUA_REGISTRYINDEX); /* check library in registry? */ + if (!lua_isnil(L, -1)) /* is there an entry? */ + plib = (void **)lua_touserdata(L, -1); + else { /* no entry yet; create one */ + lua_pop(L, 1); + plib = (void **)lua_newuserdata(L, sizeof(const void *)); + *plib = NULL; + luaL_getmetatable(L, "_LOADLIB"); + lua_setmetatable(L, -2); + lua_pushfstring(L, "%s%s", LIBPREFIX, path); + lua_pushvalue(L, -2); + lua_settable(L, LUA_REGISTRYINDEX); + } + return plib; +} + + +/* +** __gc tag method: calls library's `ll_unloadlib' function with the lib +** handle +*/ +static int gctm (lua_State *L) { + void **lib = (void **)luaL_checkudata(L, 1, "_LOADLIB"); + if (*lib) ll_unloadlib(*lib); + *lib = NULL; /* mark library as closed */ + return 0; +} + + +static int ll_loadfunc (lua_State *L, const char *path, const char *sym) { + void **reg = ll_register(L, path); + if (*reg == NULL) *reg = ll_load(L, path); + if (*reg == NULL) + return ERRLIB; /* unable to load library */ + else { + lua_CFunction f = ll_sym(L, *reg, sym); + if (f == NULL) + return ERRFUNC; /* unable to find function */ + lua_pushcfunction(L, f); + return 0; /* return function */ + } +} + + +static int ll_loadlib (lua_State *L) { + const char *path = luaL_checkstring(L, 1); + const char *init = luaL_checkstring(L, 2); + int stat = ll_loadfunc(L, path, init); + if (stat == 0) /* no errors? */ + return 1; /* return the loaded function */ + else { /* error; error message is on stack top */ + lua_pushnil(L); + lua_insert(L, -2); + lua_pushstring(L, (stat == ERRLIB) ? LIB_FAIL : "init"); + return 3; /* return nil, error message, and where */ + } +} + + + +/* +** {====================================================== +** 'require' function +** ======================================================= +*/ + + +static int readable (const char *filename) { + FILE *f = fopen(filename, "r"); /* try to open file */ + if (f == NULL) return 0; /* open failed */ + fclose(f); + return 1; +} + + +static const char *pushnexttemplate (lua_State *L, const char *path) { + const char *l; + while (*path == *LUA_PATHSEP) path++; /* skip separators */ + if (*path == '\0') return NULL; /* no more templates */ + l = strchr(path, *LUA_PATHSEP); /* find next separator */ + if (l == NULL) l = path + strlen(path); + lua_pushlstring(L, path, l - path); /* template */ + return l; +} + + +static const char *findfile (lua_State *L, const char *name, + const char *pname) { + const char *path; + name = luaL_gsub(L, name, ".", LUA_DIRSEP); + lua_getfield(L, LUA_ENVIRONINDEX, pname); + path = lua_tostring(L, -1); + if (path == NULL) + luaL_error(L, LUA_QL("package.%s") " must be a string", pname); + lua_pushstring(L, ""); /* error accumulator */ + while ((path = pushnexttemplate(L, path)) != NULL) { + const char *filename; + filename = luaL_gsub(L, lua_tostring(L, -1), LUA_PATH_MARK, name); + if (readable(filename)) /* does file exist and is readable? */ + return filename; /* return that file name */ + lua_pop(L, 2); /* remove path template and file name */ + luaO_pushfstring(L, "\n\tno file " LUA_QS, filename); + lua_concat(L, 2); + } + return NULL; /* not found */ +} + + +static void loaderror (lua_State *L, const char *filename) { + luaL_error(L, "error loading module " LUA_QS " from file " LUA_QS ":\n\t%s", + lua_tostring(L, 1), filename, lua_tostring(L, -1)); +} + + +static int loader_Lua (lua_State *L) { + const char *filename; + const char *name = luaL_checkstring(L, 1); + filename = findfile(L, name, "path"); + if (filename == NULL) return 1; /* library not found in this path */ + if (luaL_loadfile(L, filename) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + + +static const char *mkfuncname (lua_State *L, const char *modname) { + const char *funcname; + const char *mark = strchr(modname, *LUA_IGMARK); + if (mark) modname = mark + 1; + funcname = luaL_gsub(L, modname, ".", LUA_OFSEP); + funcname = lua_pushfstring(L, POF"%s", funcname); + lua_remove(L, -2); /* remove 'gsub' result */ + return funcname; +} + + +static int loader_C (lua_State *L) { + const char *funcname; + const char *name = luaL_checkstring(L, 1); + const char *filename = findfile(L, name, "cpath"); + if (filename == NULL) return 1; /* library not found in this path */ + funcname = mkfuncname(L, name); + if (ll_loadfunc(L, filename, funcname) != 0) + loaderror(L, filename); + return 1; /* library loaded successfully */ +} + + +static int loader_Croot (lua_State *L) { + const char *funcname; + const char *filename; + const char *name = luaL_checkstring(L, 1); + const char *p = strchr(name, '.'); + int stat; + if (p == NULL) return 0; /* is root */ + lua_pushlstring(L, name, p - name); + filename = findfile(L, lua_tostring(L, -1), "cpath"); + if (filename == NULL) return 1; /* root not found */ + funcname = mkfuncname(L, name); + if ((stat = ll_loadfunc(L, filename, funcname)) != 0) { + if (stat != ERRFUNC) loaderror(L, filename); /* real error */ + luaO_pushfstring(L, "\n\tno module " LUA_QS " in file " LUA_QS, + name, filename); + return 1; /* function not found */ + } + return 1; +} + + +static int loader_preload (lua_State *L) { + const char *name = luaL_checkstring(L, 1); + lua_getfield(L, LUA_ENVIRONINDEX, "preload"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.preload") " must be a table"); + lua_getfield(L, -1, name); + if (lua_isnil(L, -1)) /* not found? */ + luaO_pushfstring(L, "\n\tno field package.preload['%s']", name); + return 1; +} + + +static const int sentinel_ = 0; +#define sentinel ((void *)&sentinel_) + + +static int ll_require (lua_State *L) { + const char *name = luaL_checkstring(L, 1); + int i; + lua_settop(L, 1); /* _LOADED table will be at index 2 */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, 2, name); + if (lua_toboolean(L, -1)) { /* is it there? */ + if (lua_touserdata(L, -1) == sentinel) /* check loops */ + luaL_error(L, "loop or previous error loading module " LUA_QS, name); + return 1; /* package is already loaded */ + } + /* else must load it; iterate over available loaders */ + lua_getfield(L, LUA_ENVIRONINDEX, "loaders"); + if (!lua_istable(L, -1)) + luaL_error(L, LUA_QL("package.loaders") " must be a table"); + lua_pushstring(L, ""); /* error message accumulator */ + for (i=1; ; i++) { + lua_rawgeti(L, -2, i); /* get a loader */ + if (lua_isnil(L, -1)) + luaL_error(L, "module " LUA_QS " not found:%s", + name, lua_tostring(L, -2)); + lua_pushstring(L, name); + lua_call(L, 1, 1); /* call it */ + if (lua_isfunction(L, -1)) /* did it find module? */ + break; /* module loaded successfully */ + else if (lua_isstring(L, -1)) /* loader returned error message? */ + lua_concat(L, 2); /* accumulate it */ + else + lua_pop(L, 1); + } + lua_pushlightuserdata(L, sentinel); + lua_setfield(L, 2, name); /* _LOADED[name] = sentinel */ + lua_pushstring(L, name); /* pass name as argument to module */ + lua_call(L, 1, 1); /* run loaded module */ + if (!lua_isnil(L, -1)) /* non-nil return? */ + lua_setfield(L, 2, name); /* _LOADED[name] = returned value */ + lua_getfield(L, 2, name); + if (lua_touserdata(L, -1) == sentinel) { /* module did not set a value? */ + lua_pushboolean(L, 1); /* use true as result */ + lua_pushvalue(L, -1); /* extra copy to be returned */ + lua_setfield(L, 2, name); /* _LOADED[name] = true */ + } + return 1; +} + +/* }====================================================== */ + + + +/* +** {====================================================== +** 'module' function +** ======================================================= +*/ + + +static void setfenv (lua_State *L) { + lua_Debug ar; + lua_getstack(L, 1, &ar); + lua_getinfo(L, "f", &ar); + lua_pushvalue(L, -2); + lua_setfenv(L, -2); + lua_pop(L, 1); +} + + +static void dooptions (lua_State *L, int n) { + int i; + for (i = 2; i <= n; i++) { + lua_pushvalue(L, i); /* get option (a function) */ + lua_pushvalue(L, -2); /* module */ + lua_call(L, 1, 0); + } +} + + +static void modinit (lua_State *L, const char *modname) { + const char *dot; + lua_pushvalue(L, -1); + lua_setfield(L, -2, "_M"); /* module._M = module */ + lua_pushstring(L, modname); + lua_setfield(L, -2, "_NAME"); + dot = strrchr(modname, '.'); /* look for last dot in module name */ + if (dot == NULL) dot = modname; + else dot++; + /* set _PACKAGE as package name (full module name minus last part) */ + lua_pushlstring(L, modname, dot - modname); + lua_setfield(L, -2, "_PACKAGE"); +} + + +static int ll_module (lua_State *L) { + const char *modname = luaL_checkstring(L, 1); + int loaded = lua_gettop(L) + 1; /* index of _LOADED table */ + lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); + lua_getfield(L, loaded, modname); /* get _LOADED[modname] */ + if (!lua_istable(L, -1)) { /* not found? */ + lua_pop(L, 1); /* remove previous result */ + /* try global variable (and create one if it does not exist) */ + if (luaL_findtable(L, LUA_GLOBALSINDEX, modname, 1) != NULL) + return luaL_error(L, "name conflict for module " LUA_QS, modname); + lua_pushvalue(L, -1); + lua_setfield(L, loaded, modname); /* _LOADED[modname] = new table */ + } + /* check whether table already has a _NAME field */ + lua_getfield(L, -1, "_NAME"); + if (!lua_isnil(L, -1)) /* is table an initialized module? */ + lua_pop(L, 1); + else { /* no; initialize it */ + lua_pop(L, 1); + modinit(L, modname); + } + lua_pushvalue(L, -1); + setfenv(L); + dooptions(L, loaded - 1); + return 0; +} + + +static int ll_seeall (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + if (!lua_getmetatable(L, 1)) { + lua_createtable(L, 0, 1); /* create new metatable */ + lua_pushvalue(L, -1); + lua_setmetatable(L, 1); + } + lua_pushvalue(L, LUA_GLOBALSINDEX); + lua_setfield(L, -2, "__index"); /* mt.__index = _G */ + return 0; +} + + +/* }====================================================== */ + + + +/* auxiliary mark (for internal use) */ +#define AUXMARK "\1" + +static void setpath (lua_State *L, const char *fieldname, const char *envname, + const char *def) { + const char *path = getenv(envname); + if (path == NULL) /* no environment variable? */ + lua_pushstring(L, def); /* use default */ + else { + /* replace ";;" by ";AUXMARK;" and then AUXMARK by default path */ + path = luaL_gsub(L, path, LUA_PATHSEP LUA_PATHSEP, + LUA_PATHSEP AUXMARK LUA_PATHSEP); + luaL_gsub(L, path, AUXMARK, def); + lua_remove(L, -2); + } + setprogdir(L); + lua_setfield(L, -2, fieldname); +} + + +static const luaL_Reg pk_funcs[] = { + {"loadlib", ll_loadlib}, + {"seeall", ll_seeall}, + {NULL, NULL} +}; + + +static const luaL_Reg ll_funcs[] = { + {"module", ll_module}, + {"require", ll_require}, + {NULL, NULL} +}; + + +static const lua_CFunction loaders[] = + {loader_preload, loader_Lua, loader_C, loader_Croot, NULL}; + + +LUALIB_API int luaopen_package (lua_State *L) { + int i; + /* create new type _LOADLIB */ + luaL_newmetatable(L, "_LOADLIB"); + lua_pushcfunction(L, gctm); + lua_setfield(L, -2, "__gc"); + /* create `package' table */ + luaL_register(L, LUA_LOADLIBNAME, pk_funcs); +#if defined(LUA_COMPAT_LOADLIB) + lua_getfield(L, -1, "loadlib"); + lua_setfield(L, LUA_GLOBALSINDEX, "loadlib"); +#endif + lua_pushvalue(L, -1); + lua_replace(L, LUA_ENVIRONINDEX); + /* create `loaders' table */ + lua_createtable(L, 0, sizeof(loaders)/sizeof(loaders[0]) - 1); + /* fill it with pre-defined loaders */ + for (i=0; loaders[i] != NULL; i++) { + lua_pushcfunction(L, loaders[i]); + lua_rawseti(L, -2, i+1); + } + lua_setfield(L, -2, "loaders"); /* put it in field `loaders' */ + setpath(L, "path", LUA_PATH, LUA_PATH_DEFAULT); /* set field `path' */ + setpath(L, "cpath", LUA_CPATH, LUA_CPATH_DEFAULT); /* set field `cpath' */ + /* store config information */ + lua_pushstring(L, LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" + LUA_EXECDIR "\n" LUA_IGMARK); + lua_setfield(L, -2, "config"); + /* set field `loaded' */ + luaL_findtable(L, LUA_REGISTRYINDEX, "_LOADED", 2); + lua_setfield(L, -2, "loaded"); + /* set field `preload' */ + lua_newtable(L); + lua_setfield(L, -2, "preload"); + lua_pushvalue(L, LUA_GLOBALSINDEX); + luaL_register(L, NULL, ll_funcs); /* open lib into global table */ + lua_pop(L, 1); + return 1; /* return 'package' table */ +} + diff --git a/deps/lua/src/lobject.c b/deps/lua/src/lobject.c new file mode 100644 index 0000000000000000000000000000000000000000..acde82ccdf16365bc28b7fb2f202d09fc4a19598 --- /dev/null +++ b/deps/lua/src/lobject.c @@ -0,0 +1,214 @@ +/* +** $Id: lobject.c,v 2.22 2006/02/10 17:43:52 roberto Exp $ +** Some generic functions over Lua objects +** See Copyright Notice in lua.h +*/ + +#include +#include +#include +#include +#include + +#define lobject_c +#define LUA_CORE + +#include "lua.h" + +#include "ldo.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "lvm.h" + + + +const TValue luaO_nilobject_ = {{NULL}, LUA_TNIL}; + + +/* +** converts an integer to a "floating point byte", represented as +** (eeeeexxx), where the real value is (1xxx) * 2^(eeeee - 1) if +** eeeee != 0 and (xxx) otherwise. +*/ +int luaO_int2fb (unsigned int x) { + int e = 0; /* expoent */ + while (x >= 16) { + x = (x+1) >> 1; + e++; + } + if (x < 8) return x; + else return ((e+1) << 3) | (cast_int(x) - 8); +} + + +/* converts back */ +int luaO_fb2int (int x) { + int e = (x >> 3) & 31; + if (e == 0) return x; + else return ((x & 7)+8) << (e - 1); +} + + +int luaO_log2 (unsigned int x) { + static const lu_byte log_2[256] = { + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8 + }; + int l = -1; + while (x >= 256) { l += 8; x >>= 8; } + return l + log_2[x]; + +} + + +int luaO_rawequalObj (const TValue *t1, const TValue *t2) { + if (ttype(t1) != ttype(t2)) return 0; + else switch (ttype(t1)) { + case LUA_TNIL: + return 1; + case LUA_TNUMBER: + return luai_numeq(nvalue(t1), nvalue(t2)); + case LUA_TBOOLEAN: + return bvalue(t1) == bvalue(t2); /* boolean true must be 1 !! */ + case LUA_TLIGHTUSERDATA: + return pvalue(t1) == pvalue(t2); + default: + lua_assert(iscollectable(t1)); + return gcvalue(t1) == gcvalue(t2); + } +} + + +int luaO_str2d (const char *s, lua_Number *result) { + char *endptr; + *result = lua_str2number(s, &endptr); + if (endptr == s) return 0; /* conversion failed */ + if (*endptr == 'x' || *endptr == 'X') /* maybe an hexadecimal constant? */ + *result = cast_num(strtoul(s, &endptr, 16)); + if (*endptr == '\0') return 1; /* most common case */ + while (isspace(cast(unsigned char, *endptr))) endptr++; + if (*endptr != '\0') return 0; /* invalid trailing characters? */ + return 1; +} + + + +static void pushstr (lua_State *L, const char *str) { + setsvalue2s(L, L->top, luaS_new(L, str)); + incr_top(L); +} + + +/* this function handles only `%d', `%c', %f, %p, and `%s' formats */ +const char *luaO_pushvfstring (lua_State *L, const char *fmt, va_list argp) { + int n = 1; + pushstr(L, ""); + for (;;) { + const char *e = strchr(fmt, '%'); + if (e == NULL) break; + setsvalue2s(L, L->top, luaS_newlstr(L, fmt, e-fmt)); + incr_top(L); + switch (*(e+1)) { + case 's': { + const char *s = va_arg(argp, char *); + if (s == NULL) s = "(null)"; + pushstr(L, s); + break; + } + case 'c': { + char buff[2]; + buff[0] = cast(char, va_arg(argp, int)); + buff[1] = '\0'; + pushstr(L, buff); + break; + } + case 'd': { + setnvalue(L->top, cast_num(va_arg(argp, int))); + incr_top(L); + break; + } + case 'f': { + setnvalue(L->top, cast_num(va_arg(argp, l_uacNumber))); + incr_top(L); + break; + } + case 'p': { + char buff[4*sizeof(void *) + 8]; /* should be enough space for a `%p' */ + sprintf(buff, "%p", va_arg(argp, void *)); + pushstr(L, buff); + break; + } + case '%': { + pushstr(L, "%"); + break; + } + default: { + char buff[3]; + buff[0] = '%'; + buff[1] = *(e+1); + buff[2] = '\0'; + pushstr(L, buff); + break; + } + } + n += 2; + fmt = e+2; + } + pushstr(L, fmt); + luaV_concat(L, n+1, cast_int(L->top - L->base) - 1); + L->top -= n; + return svalue(L->top - 1); +} + + +const char *luaO_pushfstring (lua_State *L, const char *fmt, ...) { + const char *msg; + va_list argp; + va_start(argp, fmt); + msg = luaO_pushvfstring(L, fmt, argp); + va_end(argp); + return msg; +} + + +void luaO_chunkid (char *out, const char *source, size_t bufflen) { + if (*source == '=') { + strncpy(out, source+1, bufflen); /* remove first char */ + out[bufflen-1] = '\0'; /* ensures null termination */ + } + else { /* out = "source", or "...source" */ + if (*source == '@') { + size_t l; + source++; /* skip the `@' */ + bufflen -= sizeof(" '...' "); + l = strlen(source); + strcpy(out, ""); + if (l > bufflen) { + source += (l-bufflen); /* get last part of file name */ + strcat(out, "..."); + } + strcat(out, source); + } + else { /* out = [string "string"] */ + size_t len = strcspn(source, "\n\r"); /* stop at first newline */ + bufflen -= sizeof(" [string \"...\"] "); + if (len > bufflen) len = bufflen; + strcpy(out, "[string \""); + if (source[len] != '\0') { /* must truncate? */ + strncat(out, source, len); + strcat(out, "..."); + } + else + strcat(out, source); + strcat(out, "\"]"); + } + } +} diff --git a/deps/lua/src/lobject.h b/deps/lua/src/lobject.h new file mode 100644 index 0000000000000000000000000000000000000000..6e53e5a5fe656d740e0b5c3d5b56e5aeacd6abbb --- /dev/null +++ b/deps/lua/src/lobject.h @@ -0,0 +1,381 @@ +/* +** $Id: lobject.h,v 2.20 2006/01/18 11:37:34 roberto Exp $ +** Type definitions for Lua objects +** See Copyright Notice in lua.h +*/ + + +#ifndef lobject_h +#define lobject_h + + +#include +#include + +#include "llimits.h" +#include "lua.h" + + +/* tags for values visible from Lua */ +#define LAST_TAG LUA_TTHREAD + +#define NUM_TAGS (LAST_TAG+1) + + +/* +** Extra tags for non-values +*/ +#define LUA_TPROTO (LAST_TAG+1) +#define LUA_TUPVAL (LAST_TAG+2) +#define LUA_TDEADKEY (LAST_TAG+3) + + +/* +** Union of all collectable objects +*/ +typedef union GCObject GCObject; + + +/* +** Common Header for all collectable objects (in macro form, to be +** included in other objects) +*/ +#define CommonHeader GCObject *next; lu_byte tt; lu_byte marked + + +/* +** Common header in struct form +*/ +typedef struct GCheader { + CommonHeader; +} GCheader; + + + + +/* +** Union of all Lua values +*/ +typedef union { + GCObject *gc; + void *p; + lua_Number n; + int b; +} Value; + + +/* +** Tagged Values +*/ + +#define TValuefields Value value; int tt + +typedef struct lua_TValue { + TValuefields; +} TValue; + + +/* Macros to test type */ +#define ttisnil(o) (ttype(o) == LUA_TNIL) +#define ttisnumber(o) (ttype(o) == LUA_TNUMBER) +#define ttisstring(o) (ttype(o) == LUA_TSTRING) +#define ttistable(o) (ttype(o) == LUA_TTABLE) +#define ttisfunction(o) (ttype(o) == LUA_TFUNCTION) +#define ttisboolean(o) (ttype(o) == LUA_TBOOLEAN) +#define ttisuserdata(o) (ttype(o) == LUA_TUSERDATA) +#define ttisthread(o) (ttype(o) == LUA_TTHREAD) +#define ttislightuserdata(o) (ttype(o) == LUA_TLIGHTUSERDATA) + +/* Macros to access values */ +#define ttype(o) ((o)->tt) +#define gcvalue(o) check_exp(iscollectable(o), (o)->value.gc) +#define pvalue(o) check_exp(ttislightuserdata(o), (o)->value.p) +#define nvalue(o) check_exp(ttisnumber(o), (o)->value.n) +#define rawtsvalue(o) check_exp(ttisstring(o), &(o)->value.gc->ts) +#define tsvalue(o) (&rawtsvalue(o)->tsv) +#define rawuvalue(o) check_exp(ttisuserdata(o), &(o)->value.gc->u) +#define uvalue(o) (&rawuvalue(o)->uv) +#define clvalue(o) check_exp(ttisfunction(o), &(o)->value.gc->cl) +#define hvalue(o) check_exp(ttistable(o), &(o)->value.gc->h) +#define bvalue(o) check_exp(ttisboolean(o), (o)->value.b) +#define thvalue(o) check_exp(ttisthread(o), &(o)->value.gc->th) + +#define l_isfalse(o) (ttisnil(o) || (ttisboolean(o) && bvalue(o) == 0)) + +/* +** for internal debug only +*/ +#define checkconsistency(obj) \ + lua_assert(!iscollectable(obj) || (ttype(obj) == (obj)->value.gc->gch.tt)) + +#define checkliveness(g,obj) \ + lua_assert(!iscollectable(obj) || \ + ((ttype(obj) == (obj)->value.gc->gch.tt) && !isdead(g, (obj)->value.gc))) + + +/* Macros to set values */ +#define setnilvalue(obj) ((obj)->tt=LUA_TNIL) + +#define setnvalue(obj,x) \ + { TValue *i_o=(obj); i_o->value.n=(x); i_o->tt=LUA_TNUMBER; } + +#define setpvalue(obj,x) \ + { TValue *i_o=(obj); i_o->value.p=(x); i_o->tt=LUA_TLIGHTUSERDATA; } + +#define setbvalue(obj,x) \ + { TValue *i_o=(obj); i_o->value.b=(x); i_o->tt=LUA_TBOOLEAN; } + +#define setsvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TSTRING; \ + checkliveness(G(L),i_o); } + +#define setuvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TUSERDATA; \ + checkliveness(G(L),i_o); } + +#define setthvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTHREAD; \ + checkliveness(G(L),i_o); } + +#define setclvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TFUNCTION; \ + checkliveness(G(L),i_o); } + +#define sethvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TTABLE; \ + checkliveness(G(L),i_o); } + +#define setptvalue(L,obj,x) \ + { TValue *i_o=(obj); \ + i_o->value.gc=cast(GCObject *, (x)); i_o->tt=LUA_TPROTO; \ + checkliveness(G(L),i_o); } + + + + +#define setobj(L,obj1,obj2) \ + { const TValue *o2=(obj2); TValue *o1=(obj1); \ + o1->value = o2->value; o1->tt=o2->tt; \ + checkliveness(G(L),o1); } + + +/* +** different types of sets, according to destination +*/ + +/* from stack to (same) stack */ +#define setobjs2s setobj +/* to stack (not from same stack) */ +#define setobj2s setobj +#define setsvalue2s setsvalue +#define sethvalue2s sethvalue +#define setptvalue2s setptvalue +/* from table to same table */ +#define setobjt2t setobj +/* to table */ +#define setobj2t setobj +/* to new object */ +#define setobj2n setobj +#define setsvalue2n setsvalue + +#define setttype(obj, tt) (ttype(obj) = (tt)) + + +#define iscollectable(o) (ttype(o) >= LUA_TSTRING) + + + +typedef TValue *StkId; /* index to stack elements */ + + +/* +** String headers for string table +*/ +typedef union TString { + L_Umaxalign dummy; /* ensures maximum alignment for strings */ + struct { + CommonHeader; + lu_byte reserved; + unsigned int hash; + size_t len; + } tsv; +} TString; + + +#define getstr(ts) cast(const char *, (ts) + 1) +#define svalue(o) getstr(tsvalue(o)) + + + +typedef union Udata { + L_Umaxalign dummy; /* ensures maximum alignment for `local' udata */ + struct { + CommonHeader; + struct Table *metatable; + struct Table *env; + size_t len; + } uv; +} Udata; + + + + +/* +** Function Prototypes +*/ +typedef struct Proto { + CommonHeader; + TValue *k; /* constants used by the function */ + Instruction *code; + struct Proto **p; /* functions defined inside the function */ + int *lineinfo; /* map from opcodes to source lines */ + struct LocVar *locvars; /* information about local variables */ + TString **upvalues; /* upvalue names */ + TString *source; + int sizeupvalues; + int sizek; /* size of `k' */ + int sizecode; + int sizelineinfo; + int sizep; /* size of `p' */ + int sizelocvars; + int linedefined; + int lastlinedefined; + GCObject *gclist; + lu_byte nups; /* number of upvalues */ + lu_byte numparams; + lu_byte is_vararg; + lu_byte maxstacksize; +} Proto; + + +/* masks for new-style vararg */ +#define VARARG_HASARG 1 +#define VARARG_ISVARARG 2 +#define VARARG_NEEDSARG 4 + + +typedef struct LocVar { + TString *varname; + int startpc; /* first point where variable is active */ + int endpc; /* first point where variable is dead */ +} LocVar; + + + +/* +** Upvalues +*/ + +typedef struct UpVal { + CommonHeader; + TValue *v; /* points to stack or to its own value */ + union { + TValue value; /* the value (when closed) */ + struct { /* double linked list (when open) */ + struct UpVal *prev; + struct UpVal *next; + } l; + } u; +} UpVal; + + +/* +** Closures +*/ + +#define ClosureHeader \ + CommonHeader; lu_byte isC; lu_byte nupvalues; GCObject *gclist; \ + struct Table *env + +typedef struct CClosure { + ClosureHeader; + lua_CFunction f; + TValue upvalue[1]; +} CClosure; + + +typedef struct LClosure { + ClosureHeader; + struct Proto *p; + UpVal *upvals[1]; +} LClosure; + + +typedef union Closure { + CClosure c; + LClosure l; +} Closure; + + +#define iscfunction(o) (ttype(o) == LUA_TFUNCTION && clvalue(o)->c.isC) +#define isLfunction(o) (ttype(o) == LUA_TFUNCTION && !clvalue(o)->c.isC) + + +/* +** Tables +*/ + +typedef union TKey { + struct { + TValuefields; + struct Node *next; /* for chaining */ + } nk; + TValue tvk; +} TKey; + + +typedef struct Node { + TValue i_val; + TKey i_key; +} Node; + + +typedef struct Table { + CommonHeader; + lu_byte flags; /* 1<

lsizenode))) + + +#define luaO_nilobject (&luaO_nilobject_) + +LUAI_DATA const TValue luaO_nilobject_; + +#define ceillog2(x) (luaO_log2((x)-1) + 1) + +LUAI_FUNC int luaO_log2 (unsigned int x); +LUAI_FUNC int luaO_int2fb (unsigned int x); +LUAI_FUNC int luaO_fb2int (int x); +LUAI_FUNC int luaO_rawequalObj (const TValue *t1, const TValue *t2); +LUAI_FUNC int luaO_str2d (const char *s, lua_Number *result); +LUAI_FUNC const char *luaO_pushvfstring (lua_State *L, const char *fmt, + va_list argp); +LUAI_FUNC const char *luaO_pushfstring (lua_State *L, const char *fmt, ...); +LUAI_FUNC void luaO_chunkid (char *out, const char *source, size_t len); + + +#endif + diff --git a/deps/lua/src/lopcodes.c b/deps/lua/src/lopcodes.c new file mode 100644 index 0000000000000000000000000000000000000000..bf9cd522c260aa7a36ff76e19fc1a330d19b6e9c --- /dev/null +++ b/deps/lua/src/lopcodes.c @@ -0,0 +1,102 @@ +/* +** $Id: lopcodes.c,v 1.37 2005/11/08 19:45:36 roberto Exp $ +** See Copyright Notice in lua.h +*/ + + +#define lopcodes_c +#define LUA_CORE + + +#include "lopcodes.h" + + +/* ORDER OP */ + +const char *const luaP_opnames[NUM_OPCODES+1] = { + "MOVE", + "LOADK", + "LOADBOOL", + "LOADNIL", + "GETUPVAL", + "GETGLOBAL", + "GETTABLE", + "SETGLOBAL", + "SETUPVAL", + "SETTABLE", + "NEWTABLE", + "SELF", + "ADD", + "SUB", + "MUL", + "DIV", + "MOD", + "POW", + "UNM", + "NOT", + "LEN", + "CONCAT", + "JMP", + "EQ", + "LT", + "LE", + "TEST", + "TESTSET", + "CALL", + "TAILCALL", + "RETURN", + "FORLOOP", + "FORPREP", + "TFORLOOP", + "SETLIST", + "CLOSE", + "CLOSURE", + "VARARG", + NULL +}; + + +#define opmode(t,a,b,c,m) (((t)<<7) | ((a)<<6) | ((b)<<4) | ((c)<<2) | (m)) + +const lu_byte luaP_opmodes[NUM_OPCODES] = { +/* T A B C mode opcode */ + opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_MOVE */ + ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_LOADK */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_LOADBOOL */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LOADNIL */ + ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_GETUPVAL */ + ,opmode(0, 1, OpArgK, OpArgN, iABx) /* OP_GETGLOBAL */ + ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_GETTABLE */ + ,opmode(0, 0, OpArgK, OpArgN, iABx) /* OP_SETGLOBAL */ + ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_SETUPVAL */ + ,opmode(0, 0, OpArgK, OpArgK, iABC) /* OP_SETTABLE */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_NEWTABLE */ + ,opmode(0, 1, OpArgR, OpArgK, iABC) /* OP_SELF */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_ADD */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_SUB */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MUL */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_DIV */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_MOD */ + ,opmode(0, 1, OpArgK, OpArgK, iABC) /* OP_POW */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_UNM */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_NOT */ + ,opmode(0, 1, OpArgR, OpArgN, iABC) /* OP_LEN */ + ,opmode(0, 1, OpArgR, OpArgR, iABC) /* OP_CONCAT */ + ,opmode(0, 0, OpArgR, OpArgN, iAsBx) /* OP_JMP */ + ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_EQ */ + ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LT */ + ,opmode(1, 0, OpArgK, OpArgK, iABC) /* OP_LE */ + ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TEST */ + ,opmode(1, 1, OpArgR, OpArgU, iABC) /* OP_TESTSET */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_CALL */ + ,opmode(0, 1, OpArgU, OpArgU, iABC) /* OP_TAILCALL */ + ,opmode(0, 0, OpArgU, OpArgN, iABC) /* OP_RETURN */ + ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORLOOP */ + ,opmode(0, 1, OpArgR, OpArgN, iAsBx) /* OP_FORPREP */ + ,opmode(1, 0, OpArgN, OpArgU, iABC) /* OP_TFORLOOP */ + ,opmode(0, 0, OpArgU, OpArgU, iABC) /* OP_SETLIST */ + ,opmode(0, 0, OpArgN, OpArgN, iABC) /* OP_CLOSE */ + ,opmode(0, 1, OpArgU, OpArgN, iABx) /* OP_CLOSURE */ + ,opmode(0, 1, OpArgU, OpArgN, iABC) /* OP_VARARG */ +}; + diff --git a/deps/lua/src/lopcodes.h b/deps/lua/src/lopcodes.h new file mode 100644 index 0000000000000000000000000000000000000000..2834b1d74dadee7625025aeb6746bfdff404229f --- /dev/null +++ b/deps/lua/src/lopcodes.h @@ -0,0 +1,268 @@ +/* +** $Id: lopcodes.h,v 1.124 2005/12/02 18:42:08 roberto Exp $ +** Opcodes for Lua virtual machine +** See Copyright Notice in lua.h +*/ + +#ifndef lopcodes_h +#define lopcodes_h + +#include "llimits.h" + + +/*=========================================================================== + We assume that instructions are unsigned numbers. + All instructions have an opcode in the first 6 bits. + Instructions can have the following fields: + `A' : 8 bits + `B' : 9 bits + `C' : 9 bits + `Bx' : 18 bits (`B' and `C' together) + `sBx' : signed Bx + + A signed argument is represented in excess K; that is, the number + value is the unsigned value minus K. K is exactly the maximum value + for that argument (so that -max is represented by 0, and +max is + represented by 2*max), which is half the maximum for the corresponding + unsigned argument. +===========================================================================*/ + + +enum OpMode {iABC, iABx, iAsBx}; /* basic instruction format */ + + +/* +** size and position of opcode arguments. +*/ +#define SIZE_C 9 +#define SIZE_B 9 +#define SIZE_Bx (SIZE_C + SIZE_B) +#define SIZE_A 8 + +#define SIZE_OP 6 + +#define POS_OP 0 +#define POS_A (POS_OP + SIZE_OP) +#define POS_C (POS_A + SIZE_A) +#define POS_B (POS_C + SIZE_C) +#define POS_Bx POS_C + + +/* +** limits for opcode arguments. +** we use (signed) int to manipulate most arguments, +** so they must fit in LUAI_BITSINT-1 bits (-1 for sign) +*/ +#if SIZE_Bx < LUAI_BITSINT-1 +#define MAXARG_Bx ((1<>1) /* `sBx' is signed */ +#else +#define MAXARG_Bx MAX_INT +#define MAXARG_sBx MAX_INT +#endif + + +#define MAXARG_A ((1<>POS_OP) & MASK1(SIZE_OP,0))) +#define SET_OPCODE(i,o) ((i) = (((i)&MASK0(SIZE_OP,POS_OP)) | \ + ((cast(Instruction, o)<>POS_A) & MASK1(SIZE_A,0))) +#define SETARG_A(i,u) ((i) = (((i)&MASK0(SIZE_A,POS_A)) | \ + ((cast(Instruction, u)<>POS_B) & MASK1(SIZE_B,0))) +#define SETARG_B(i,b) ((i) = (((i)&MASK0(SIZE_B,POS_B)) | \ + ((cast(Instruction, b)<>POS_C) & MASK1(SIZE_C,0))) +#define SETARG_C(i,b) ((i) = (((i)&MASK0(SIZE_C,POS_C)) | \ + ((cast(Instruction, b)<>POS_Bx) & MASK1(SIZE_Bx,0))) +#define SETARG_Bx(i,b) ((i) = (((i)&MASK0(SIZE_Bx,POS_Bx)) | \ + ((cast(Instruction, b)< C) then pc++ */ +OP_TESTSET,/* A B C if (R(B) <=> C) then R(A) := R(B) else pc++ */ + +OP_CALL,/* A B C R(A), ... ,R(A+C-2) := R(A)(R(A+1), ... ,R(A+B-1)) */ +OP_TAILCALL,/* A B C return R(A)(R(A+1), ... ,R(A+B-1)) */ +OP_RETURN,/* A B return R(A), ... ,R(A+B-2) (see note) */ + +OP_FORLOOP,/* A sBx R(A)+=R(A+2); + if R(A) =) R(A)*/ +OP_CLOSURE,/* A Bx R(A) := closure(KPROTO[Bx], R(A), ... ,R(A+n)) */ + +OP_VARARG/* A B R(A), R(A+1), ..., R(A+B-1) = vararg */ +} OpCode; + + +#define NUM_OPCODES (cast(int, OP_VARARG) + 1) + + + +/*=========================================================================== + Notes: + (*) In OP_CALL, if (B == 0) then B = top. C is the number of returns - 1, + and can be 0: OP_CALL then sets `top' to last_result+1, so + next open instruction (OP_CALL, OP_RETURN, OP_SETLIST) may use `top'. + + (*) In OP_VARARG, if (B == 0) then use actual number of varargs and + set top (like in OP_CALL with C == 0). + + (*) In OP_RETURN, if (B == 0) then return up to `top' + + (*) In OP_SETLIST, if (B == 0) then B = `top'; + if (C == 0) then next `instruction' is real C + + (*) For comparisons, A specifies what condition the test should accept + (true or false). + + (*) All `skips' (pc++) assume that next instruction is a jump +===========================================================================*/ + + +/* +** masks for instruction properties. The format is: +** bits 0-1: op mode +** bits 2-3: C arg mode +** bits 4-5: B arg mode +** bit 6: instruction set register A +** bit 7: operator is a test +*/ + +enum OpArgMask { + OpArgN, /* argument is not used */ + OpArgU, /* argument is used */ + OpArgR, /* argument is a register or a jump offset */ + OpArgK /* argument is a constant or register/constant */ +}; + +LUAI_DATA const lu_byte luaP_opmodes[NUM_OPCODES]; + +#define getOpMode(m) (cast(enum OpMode, luaP_opmodes[m] & 3)) +#define getBMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 4) & 3)) +#define getCMode(m) (cast(enum OpArgMask, (luaP_opmodes[m] >> 2) & 3)) +#define testAMode(m) (luaP_opmodes[m] & (1 << 6)) +#define testTMode(m) (luaP_opmodes[m] & (1 << 7)) + + +LUAI_DATA const char *const luaP_opnames[NUM_OPCODES+1]; /* opcode names */ + + +/* number of list items to accumulate before a SETLIST instruction */ +#define LFIELDS_PER_FLUSH 50 + + +#endif diff --git a/deps/lua/src/loslib.c b/deps/lua/src/loslib.c new file mode 100644 index 0000000000000000000000000000000000000000..509d7b72e49dffc89dc4f62417d9350d0a7d5c8a --- /dev/null +++ b/deps/lua/src/loslib.c @@ -0,0 +1,238 @@ +/* +** $Id: loslib.c,v 1.17 2006/01/27 13:54:31 roberto Exp $ +** Standard Operating System library +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include +#include + +#define loslib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +static int os_pushresult (lua_State *L, int i, const char *filename) { + int en = errno; /* calls to Lua API may change this value */ + if (i) { + lua_pushboolean(L, 1); + return 1; + } + else { + lua_pushnil(L); + if (filename) + lua_pushfstring(L, "%s: %s", filename, strerror(en)); + else + lua_pushfstring(L, "%s", strerror(en)); + lua_pushinteger(L, en); + return 3; + } +} + + +static int os_execute (lua_State *L) { + lua_pushinteger(L, system(luaL_optstring(L, 1, NULL))); + return 1; +} + + +static int os_remove (lua_State *L) { + const char *filename = luaL_checkstring(L, 1); + return os_pushresult(L, remove(filename) == 0, filename); +} + + +static int os_rename (lua_State *L) { + const char *fromname = luaL_checkstring(L, 1); + const char *toname = luaL_checkstring(L, 2); + return os_pushresult(L, rename(fromname, toname) == 0, fromname); +} + + +static int os_tmpname (lua_State *L) { + char buff[LUA_TMPNAMBUFSIZE]; + int err; + lua_tmpnam(buff, err); + if (err) + return luaL_error(L, "unable to generate a unique filename"); + lua_pushstring(L, buff); + return 1; +} + + +static int os_getenv (lua_State *L) { + lua_pushstring(L, getenv(luaL_checkstring(L, 1))); /* if NULL push nil */ + return 1; +} + + +static int os_clock (lua_State *L) { + lua_pushnumber(L, ((lua_Number)clock())/(lua_Number)CLOCKS_PER_SEC); + return 1; +} + + +/* +** {====================================================== +** Time/Date operations +** { year=%Y, month=%m, day=%d, hour=%H, min=%M, sec=%S, +** wday=%w+1, yday=%j, isdst=? } +** ======================================================= +*/ + +static void setfield (lua_State *L, const char *key, int value) { + lua_pushinteger(L, value); + lua_setfield(L, -2, key); +} + +static void setboolfield (lua_State *L, const char *key, int value) { + if (value < 0) /* undefined? */ + return; /* does not set field */ + lua_pushboolean(L, value); + lua_setfield(L, -2, key); +} + +static int getboolfield (lua_State *L, const char *key) { + int res; + lua_getfield(L, -1, key); + res = lua_isnil(L, -1) ? -1 : lua_toboolean(L, -1); + lua_pop(L, 1); + return res; +} + + +static int getfield (lua_State *L, const char *key, int d) { + int res; + lua_getfield(L, -1, key); + if (lua_isnumber(L, -1)) + res = (int)lua_tointeger(L, -1); + else { + if (d < 0) + return luaL_error(L, "field " LUA_QS " missing in date table", key); + res = d; + } + lua_pop(L, 1); + return res; +} + + +static int os_date (lua_State *L) { + const char *s = luaL_optstring(L, 1, "%c"); + time_t t = lua_isnoneornil(L, 2) ? time(NULL) : + (time_t)luaL_checknumber(L, 2); + struct tm *stm; + if (*s == '!') { /* UTC? */ + stm = gmtime(&t); + s++; /* skip `!' */ + } + else + stm = localtime(&t); + if (stm == NULL) /* invalid date? */ + lua_pushnil(L); + else if (strcmp(s, "*t") == 0) { + lua_createtable(L, 0, 9); /* 9 = number of fields */ + setfield(L, "sec", stm->tm_sec); + setfield(L, "min", stm->tm_min); + setfield(L, "hour", stm->tm_hour); + setfield(L, "day", stm->tm_mday); + setfield(L, "month", stm->tm_mon+1); + setfield(L, "year", stm->tm_year+1900); + setfield(L, "wday", stm->tm_wday+1); + setfield(L, "yday", stm->tm_yday+1); + setboolfield(L, "isdst", stm->tm_isdst); + } + else { + char b[256]; + if (strftime(b, sizeof(b), s, stm)) + lua_pushstring(L, b); + else + return luaL_error(L, LUA_QL("date") " format too long"); + } + return 1; +} + + +static int os_time (lua_State *L) { + time_t t; + if (lua_isnoneornil(L, 1)) /* called without args? */ + t = time(NULL); /* get current time */ + else { + struct tm ts; + luaL_checktype(L, 1, LUA_TTABLE); + lua_settop(L, 1); /* make sure table is at the top */ + ts.tm_sec = getfield(L, "sec", 0); + ts.tm_min = getfield(L, "min", 0); + ts.tm_hour = getfield(L, "hour", 12); + ts.tm_mday = getfield(L, "day", -1); + ts.tm_mon = getfield(L, "month", -1) - 1; + ts.tm_year = getfield(L, "year", -1) - 1900; + ts.tm_isdst = getboolfield(L, "isdst"); + t = mktime(&ts); + } + if (t == (time_t)(-1)) + lua_pushnil(L); + else + lua_pushnumber(L, (lua_Number)t); + return 1; +} + + +static int os_difftime (lua_State *L) { + lua_pushnumber(L, difftime((time_t)(luaL_checknumber(L, 1)), + (time_t)(luaL_optnumber(L, 2, 0)))); + return 1; +} + +/* }====================================================== */ + + +static int os_setlocale (lua_State *L) { + static const int cat[] = {LC_ALL, LC_COLLATE, LC_CTYPE, LC_MONETARY, + LC_NUMERIC, LC_TIME}; + static const char *const catnames[] = {"all", "collate", "ctype", "monetary", + "numeric", "time", NULL}; + const char *l = lua_tostring(L, 1); + int op = luaL_checkoption(L, 2, "all", catnames); + luaL_argcheck(L, l || lua_isnoneornil(L, 1), 1, "string expected"); + lua_pushstring(L, setlocale(cat[op], l)); + return 1; +} + + +static int os_exit (lua_State *L) { + exit(luaL_optint(L, 1, EXIT_SUCCESS)); + return 0; /* to avoid warnings */ +} + +static const luaL_Reg syslib[] = { + {"clock", os_clock}, + {"date", os_date}, + {"difftime", os_difftime}, + {"execute", os_execute}, + {"exit", os_exit}, + {"getenv", os_getenv}, + {"remove", os_remove}, + {"rename", os_rename}, + {"setlocale", os_setlocale}, + {"time", os_time}, + {"tmpname", os_tmpname}, + {NULL, NULL} +}; + +/* }====================================================== */ + + + +LUALIB_API int luaopen_os (lua_State *L) { + luaL_register(L, LUA_OSLIBNAME, syslib); + return 1; +} + diff --git a/deps/lua/src/lparser.c b/deps/lua/src/lparser.c new file mode 100644 index 0000000000000000000000000000000000000000..b40ee794fe785d60578b49c8471e23c590703126 --- /dev/null +++ b/deps/lua/src/lparser.c @@ -0,0 +1,1336 @@ +/* +** $Id: lparser.c,v 2.40 2005/12/22 16:19:56 roberto Exp $ +** Lua Parser +** See Copyright Notice in lua.h +*/ + + +#include + +#define lparser_c +#define LUA_CORE + +#include "lua.h" + +#include "lcode.h" +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "llex.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lparser.h" +#include "lstate.h" +#include "lstring.h" + + + + +#define hasmultret(k) ((k) == VCALL || (k) == VVARARG) + +#define getlocvar(fs, i) ((fs)->f->locvars[(fs)->actvar[i]]) + +#define luaY_checklimit(fs,v,l,m) if ((v)>(l)) errorlimit(fs,l,m) + + +/* +** nodes for block list (list of active blocks) +*/ +typedef struct BlockCnt { + struct BlockCnt *previous; /* chain */ + int breaklist; /* list of jumps out of this loop */ + lu_byte nactvar; /* # active locals outside the breakable structure */ + lu_byte upval; /* true if some variable in the block is an upvalue */ + lu_byte isbreakable; /* true if `block' is a loop */ +} BlockCnt; + + + +/* +** prototypes for recursive non-terminal functions +*/ +static void chunk (LexState *ls); +static void expr (LexState *ls, expdesc *v); + + +static void anchor_token (LexState *ls) { + if (ls->t.token == TK_NAME || ls->t.token == TK_STRING) { + TString *ts = ls->t.seminfo.ts; + luaX_newstring(ls, getstr(ts), ts->tsv.len); + } +} + + +static void error_expected (LexState *ls, int token) { + luaX_syntaxerror(ls, + luaO_pushfstring(ls->L, LUA_QS " expected", luaX_token2str(ls, token))); +} + + +static void errorlimit (FuncState *fs, int limit, const char *what) { + const char *msg = (fs->f->linedefined == 0) ? + luaO_pushfstring(fs->L, "main function has more than %d %s", limit, what) : + luaO_pushfstring(fs->L, "function at line %d has more than %d %s", + fs->f->linedefined, limit, what); + luaX_lexerror(fs->ls, msg, 0); +} + + +static int testnext (LexState *ls, int c) { + if (ls->t.token == c) { + luaX_next(ls); + return 1; + } + else return 0; +} + + +static void check (LexState *ls, int c) { + if (ls->t.token != c) + error_expected(ls, c); +} + +static void checknext (LexState *ls, int c) { + check(ls, c); + luaX_next(ls); +} + + +#define check_condition(ls,c,msg) { if (!(c)) luaX_syntaxerror(ls, msg); } + + + +static void check_match (LexState *ls, int what, int who, int where) { + if (!testnext(ls, what)) { + if (where == ls->linenumber) + error_expected(ls, what); + else { + luaX_syntaxerror(ls, luaO_pushfstring(ls->L, + LUA_QS " expected (to close " LUA_QS " at line %d)", + luaX_token2str(ls, what), luaX_token2str(ls, who), where)); + } + } +} + + +static TString *str_checkname (LexState *ls) { + TString *ts; + check(ls, TK_NAME); + ts = ls->t.seminfo.ts; + luaX_next(ls); + return ts; +} + + +static void init_exp (expdesc *e, expkind k, int i) { + e->f = e->t = NO_JUMP; + e->k = k; + e->u.s.info = i; +} + + +static void codestring (LexState *ls, expdesc *e, TString *s) { + init_exp(e, VK, luaK_stringK(ls->fs, s)); +} + + +static void checkname(LexState *ls, expdesc *e) { + codestring(ls, e, str_checkname(ls)); +} + + +static int registerlocalvar (LexState *ls, TString *varname) { + FuncState *fs = ls->fs; + Proto *f = fs->f; + int oldsize = f->sizelocvars; + luaM_growvector(ls->L, f->locvars, fs->nlocvars, f->sizelocvars, + LocVar, SHRT_MAX, "too many local variables"); + while (oldsize < f->sizelocvars) f->locvars[oldsize++].varname = NULL; + f->locvars[fs->nlocvars].varname = varname; + luaC_objbarrier(ls->L, f, varname); + return fs->nlocvars++; +} + + +#define new_localvarliteral(ls,v,n) \ + new_localvar(ls, luaX_newstring(ls, "" v, (sizeof(v)/sizeof(char))-1), n) + + +static void new_localvar (LexState *ls, TString *name, int n) { + FuncState *fs = ls->fs; + luaY_checklimit(fs, fs->nactvar+n+1, LUAI_MAXVARS, "local variables"); + fs->actvar[fs->nactvar+n] = cast(unsigned short, registerlocalvar(ls, name)); +} + + +static void adjustlocalvars (LexState *ls, int nvars) { + FuncState *fs = ls->fs; + fs->nactvar = cast_byte(fs->nactvar + nvars); + for (; nvars; nvars--) { + getlocvar(fs, fs->nactvar - nvars).startpc = fs->pc; + } +} + + +static void removevars (LexState *ls, int tolevel) { + FuncState *fs = ls->fs; + while (fs->nactvar > tolevel) + getlocvar(fs, --fs->nactvar).endpc = fs->pc; +} + + +static int indexupvalue (FuncState *fs, TString *name, expdesc *v) { + int i; + Proto *f = fs->f; + int oldsize = f->sizeupvalues; + for (i=0; inups; i++) { + if (fs->upvalues[i].k == v->k && fs->upvalues[i].info == v->u.s.info) { + lua_assert(f->upvalues[i] == name); + return i; + } + } + /* new one */ + luaY_checklimit(fs, f->nups + 1, LUAI_MAXUPVALUES, "upvalues"); + luaM_growvector(fs->L, f->upvalues, f->nups, f->sizeupvalues, + TString *, MAX_INT, ""); + while (oldsize < f->sizeupvalues) f->upvalues[oldsize++] = NULL; + f->upvalues[f->nups] = name; + luaC_objbarrier(fs->L, f, name); + lua_assert(v->k == VLOCAL || v->k == VUPVAL); + fs->upvalues[f->nups].k = cast_byte(v->k); + fs->upvalues[f->nups].info = cast_byte(v->u.s.info); + return f->nups++; +} + + +static int searchvar (FuncState *fs, TString *n) { + int i; + for (i=fs->nactvar-1; i >= 0; i--) { + if (n == getlocvar(fs, i).varname) + return i; + } + return -1; /* not found */ +} + + +static void markupval (FuncState *fs, int level) { + BlockCnt *bl = fs->bl; + while (bl && bl->nactvar > level) bl = bl->previous; + if (bl) bl->upval = 1; +} + + +static int singlevaraux (FuncState *fs, TString *n, expdesc *var, int base) { + if (fs == NULL) { /* no more levels? */ + init_exp(var, VGLOBAL, NO_REG); /* default is global variable */ + return VGLOBAL; + } + else { + int v = searchvar(fs, n); /* look up at current level */ + if (v >= 0) { + init_exp(var, VLOCAL, v); + if (!base) + markupval(fs, v); /* local will be used as an upval */ + return VLOCAL; + } + else { /* not found at current level; try upper one */ + if (singlevaraux(fs->prev, n, var, 0) == VGLOBAL) + return VGLOBAL; + var->u.s.info = indexupvalue(fs, n, var); /* else was LOCAL or UPVAL */ + var->k = VUPVAL; /* upvalue in this level */ + return VUPVAL; + } + } +} + + +static void singlevar (LexState *ls, expdesc *var) { + TString *varname = str_checkname(ls); + FuncState *fs = ls->fs; + if (singlevaraux(fs, varname, var, 1) == VGLOBAL) + var->u.s.info = luaK_stringK(fs, varname); /* info points to global name */ +} + + +static void adjust_assign (LexState *ls, int nvars, int nexps, expdesc *e) { + FuncState *fs = ls->fs; + int extra = nvars - nexps; + if (hasmultret(e->k)) { + extra++; /* includes call itself */ + if (extra < 0) extra = 0; + luaK_setreturns(fs, e, extra); /* last exp. provides the difference */ + if (extra > 1) luaK_reserveregs(fs, extra-1); + } + else { + if (e->k != VVOID) luaK_exp2nextreg(fs, e); /* close last expression */ + if (extra > 0) { + int reg = fs->freereg; + luaK_reserveregs(fs, extra); + luaK_nil(fs, reg, extra); + } + } +} + + +static void enterlevel (LexState *ls) { + if (++ls->L->nCcalls > LUAI_MAXCCALLS) + luaX_lexerror(ls, "chunk has too many syntax levels", 0); +} + + +#define leavelevel(ls) ((ls)->L->nCcalls--) + + +static void enterblock (FuncState *fs, BlockCnt *bl, lu_byte isbreakable) { + bl->breaklist = NO_JUMP; + bl->isbreakable = isbreakable; + bl->nactvar = fs->nactvar; + bl->upval = 0; + bl->previous = fs->bl; + fs->bl = bl; + lua_assert(fs->freereg == fs->nactvar); +} + + +static void leaveblock (FuncState *fs) { + BlockCnt *bl = fs->bl; + fs->bl = bl->previous; + removevars(fs->ls, bl->nactvar); + if (bl->upval) + luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0); + lua_assert(!bl->isbreakable || !bl->upval); /* loops have no body */ + lua_assert(bl->nactvar == fs->nactvar); + fs->freereg = fs->nactvar; /* free registers */ + luaK_patchtohere(fs, bl->breaklist); +} + + +static void pushclosure (LexState *ls, FuncState *func, expdesc *v) { + FuncState *fs = ls->fs; + Proto *f = fs->f; + int oldsize = f->sizep; + int i; + luaM_growvector(ls->L, f->p, fs->np, f->sizep, Proto *, + MAXARG_Bx, "constant table overflow"); + while (oldsize < f->sizep) f->p[oldsize++] = NULL; + f->p[fs->np++] = func->f; + luaC_objbarrier(ls->L, f, func->f); + init_exp(v, VRELOCABLE, luaK_codeABx(fs, OP_CLOSURE, 0, fs->np-1)); + for (i=0; if->nups; i++) { + OpCode o = (func->upvalues[i].k == VLOCAL) ? OP_MOVE : OP_GETUPVAL; + luaK_codeABC(fs, o, 0, func->upvalues[i].info, 0); + } +} + + +static void open_func (LexState *ls, FuncState *fs) { + lua_State *L = ls->L; + Proto *f = luaF_newproto(L); + fs->f = f; + fs->prev = ls->fs; /* linked list of funcstates */ + fs->ls = ls; + fs->L = L; + ls->fs = fs; + fs->pc = 0; + fs->lasttarget = -1; + fs->jpc = NO_JUMP; + fs->freereg = 0; + fs->nk = 0; + fs->np = 0; + fs->nlocvars = 0; + fs->nactvar = 0; + fs->bl = NULL; + f->source = ls->source; + f->maxstacksize = 2; /* registers 0/1 are always valid */ + fs->h = luaH_new(L, 0, 0); + /* anchor table of constants and prototype (to avoid being collected) */ + sethvalue2s(L, L->top, fs->h); + incr_top(L); + setptvalue2s(L, L->top, f); + incr_top(L); +} + + +static void close_func (LexState *ls) { + lua_State *L = ls->L; + FuncState *fs = ls->fs; + Proto *f = fs->f; + removevars(ls, 0); + luaK_ret(fs, 0, 0); /* final return */ + luaM_reallocvector(L, f->code, f->sizecode, fs->pc, Instruction); + f->sizecode = fs->pc; + luaM_reallocvector(L, f->lineinfo, f->sizelineinfo, fs->pc, int); + f->sizelineinfo = fs->pc; + luaM_reallocvector(L, f->k, f->sizek, fs->nk, TValue); + f->sizek = fs->nk; + luaM_reallocvector(L, f->p, f->sizep, fs->np, Proto *); + f->sizep = fs->np; + luaM_reallocvector(L, f->locvars, f->sizelocvars, fs->nlocvars, LocVar); + f->sizelocvars = fs->nlocvars; + luaM_reallocvector(L, f->upvalues, f->sizeupvalues, f->nups, TString *); + f->sizeupvalues = f->nups; + lua_assert(luaG_checkcode(f)); + lua_assert(fs->bl == NULL); + ls->fs = fs->prev; + L->top -= 2; /* remove table and prototype from the stack */ + /* last token read was anchored in defunct function; must reanchor it */ + if (fs) anchor_token(ls); +} + + +Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, const char *name) { + struct LexState lexstate; + struct FuncState funcstate; + lexstate.buff = buff; + luaX_setinput(L, &lexstate, z, luaS_new(L, name)); + open_func(&lexstate, &funcstate); + funcstate.f->is_vararg = VARARG_ISVARARG; /* main func. is always vararg */ + luaX_next(&lexstate); /* read first token */ + chunk(&lexstate); + check(&lexstate, TK_EOS); + close_func(&lexstate); + lua_assert(funcstate.prev == NULL); + lua_assert(funcstate.f->nups == 0); + lua_assert(lexstate.fs == NULL); + return funcstate.f; +} + + + +/*============================================================*/ +/* GRAMMAR RULES */ +/*============================================================*/ + + +static void field (LexState *ls, expdesc *v) { + /* field -> ['.' | ':'] NAME */ + FuncState *fs = ls->fs; + expdesc key; + luaK_exp2anyreg(fs, v); + luaX_next(ls); /* skip the dot or colon */ + checkname(ls, &key); + luaK_indexed(fs, v, &key); +} + + +static void yindex (LexState *ls, expdesc *v) { + /* index -> '[' expr ']' */ + luaX_next(ls); /* skip the '[' */ + expr(ls, v); + luaK_exp2val(ls->fs, v); + checknext(ls, ']'); +} + + +/* +** {====================================================================== +** Rules for Constructors +** ======================================================================= +*/ + + +struct ConsControl { + expdesc v; /* last list item read */ + expdesc *t; /* table descriptor */ + int nh; /* total number of `record' elements */ + int na; /* total number of array elements */ + int tostore; /* number of array elements pending to be stored */ +}; + + +static void recfield (LexState *ls, struct ConsControl *cc) { + /* recfield -> (NAME | `['exp1`]') = exp1 */ + FuncState *fs = ls->fs; + int reg = ls->fs->freereg; + expdesc key, val; + if (ls->t.token == TK_NAME) { + luaY_checklimit(fs, cc->nh, MAX_INT, "items in a constructor"); + checkname(ls, &key); + } + else /* ls->t.token == '[' */ + yindex(ls, &key); + cc->nh++; + checknext(ls, '='); + luaK_exp2RK(fs, &key); + expr(ls, &val); + luaK_codeABC(fs, OP_SETTABLE, cc->t->u.s.info, luaK_exp2RK(fs, &key), + luaK_exp2RK(fs, &val)); + fs->freereg = reg; /* free registers */ +} + + +static void closelistfield (FuncState *fs, struct ConsControl *cc) { + if (cc->v.k == VVOID) return; /* there is no list item */ + luaK_exp2nextreg(fs, &cc->v); + cc->v.k = VVOID; + if (cc->tostore == LFIELDS_PER_FLUSH) { + luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore); /* flush */ + cc->tostore = 0; /* no more items pending */ + } +} + + +static void lastlistfield (FuncState *fs, struct ConsControl *cc) { + if (cc->tostore == 0) return; + if (hasmultret(cc->v.k)) { + luaK_setmultret(fs, &cc->v); + luaK_setlist(fs, cc->t->u.s.info, cc->na, LUA_MULTRET); + cc->na--; /* do not count last expression (unknown number of elements) */ + } + else { + if (cc->v.k != VVOID) + luaK_exp2nextreg(fs, &cc->v); + luaK_setlist(fs, cc->t->u.s.info, cc->na, cc->tostore); + } +} + + +static void listfield (LexState *ls, struct ConsControl *cc) { + expr(ls, &cc->v); + luaY_checklimit(ls->fs, cc->na, MAXARG_Bx, "items in a constructor"); + cc->na++; + cc->tostore++; +} + + +static void constructor (LexState *ls, expdesc *t) { + /* constructor -> ?? */ + FuncState *fs = ls->fs; + int line = ls->linenumber; + int pc = luaK_codeABC(fs, OP_NEWTABLE, 0, 0, 0); + struct ConsControl cc; + cc.na = cc.nh = cc.tostore = 0; + cc.t = t; + init_exp(t, VRELOCABLE, pc); + init_exp(&cc.v, VVOID, 0); /* no value (yet) */ + luaK_exp2nextreg(ls->fs, t); /* fix it at stack top (for gc) */ + checknext(ls, '{'); + do { + lua_assert(cc.v.k == VVOID || cc.tostore > 0); + if (ls->t.token == '}') break; + closelistfield(fs, &cc); + switch(ls->t.token) { + case TK_NAME: { /* may be listfields or recfields */ + luaX_lookahead(ls); + if (ls->lookahead.token != '=') /* expression? */ + listfield(ls, &cc); + else + recfield(ls, &cc); + break; + } + case '[': { /* constructor_item -> recfield */ + recfield(ls, &cc); + break; + } + default: { /* constructor_part -> listfield */ + listfield(ls, &cc); + break; + } + } + } while (testnext(ls, ',') || testnext(ls, ';')); + check_match(ls, '}', '{', line); + lastlistfield(fs, &cc); + SETARG_B(fs->f->code[pc], luaO_int2fb(cc.na)); /* set initial array size */ + SETARG_C(fs->f->code[pc], luaO_int2fb(cc.nh)); /* set initial table size */ +} + +/* }====================================================================== */ + + + +static void parlist (LexState *ls) { + /* parlist -> [ param { `,' param } ] */ + FuncState *fs = ls->fs; + Proto *f = fs->f; + int nparams = 0; + f->is_vararg = 0; + if (ls->t.token != ')') { /* is `parlist' not empty? */ + do { + switch (ls->t.token) { + case TK_NAME: { /* param -> NAME */ + new_localvar(ls, str_checkname(ls), nparams++); + break; + } + case TK_DOTS: { /* param -> `...' */ + luaX_next(ls); +#if defined(LUA_COMPAT_VARARG) + /* use `arg' as default name */ + new_localvarliteral(ls, "arg", nparams++); + f->is_vararg = VARARG_HASARG | VARARG_NEEDSARG; +#endif + f->is_vararg |= VARARG_ISVARARG; + break; + } + default: luaX_syntaxerror(ls, " or " LUA_QL("...") " expected"); + } + } while (!f->is_vararg && testnext(ls, ',')); + } + adjustlocalvars(ls, nparams); + f->numparams = cast_byte(fs->nactvar - (f->is_vararg & VARARG_HASARG)); + luaK_reserveregs(fs, fs->nactvar); /* reserve register for parameters */ +} + + +static void body (LexState *ls, expdesc *e, int needself, int line) { + /* body -> `(' parlist `)' chunk END */ + FuncState new_fs; + open_func(ls, &new_fs); + new_fs.f->linedefined = line; + checknext(ls, '('); + if (needself) { + new_localvarliteral(ls, "self", 0); + adjustlocalvars(ls, 1); + } + parlist(ls); + checknext(ls, ')'); + chunk(ls); + new_fs.f->lastlinedefined = ls->linenumber; + check_match(ls, TK_END, TK_FUNCTION, line); + close_func(ls); + pushclosure(ls, &new_fs, e); +} + + +static int explist1 (LexState *ls, expdesc *v) { + /* explist1 -> expr { `,' expr } */ + int n = 1; /* at least one expression */ + expr(ls, v); + while (testnext(ls, ',')) { + luaK_exp2nextreg(ls->fs, v); + expr(ls, v); + n++; + } + return n; +} + + +static void funcargs (LexState *ls, expdesc *f) { + FuncState *fs = ls->fs; + expdesc args; + int base, nparams; + int line = ls->linenumber; + switch (ls->t.token) { + case '(': { /* funcargs -> `(' [ explist1 ] `)' */ + if (line != ls->lastline) + luaX_syntaxerror(ls,"ambiguous syntax (function call x new statement)"); + luaX_next(ls); + if (ls->t.token == ')') /* arg list is empty? */ + args.k = VVOID; + else { + explist1(ls, &args); + luaK_setmultret(fs, &args); + } + check_match(ls, ')', '(', line); + break; + } + case '{': { /* funcargs -> constructor */ + constructor(ls, &args); + break; + } + case TK_STRING: { /* funcargs -> STRING */ + codestring(ls, &args, ls->t.seminfo.ts); + luaX_next(ls); /* must use `seminfo' before `next' */ + break; + } + default: { + luaX_syntaxerror(ls, "function arguments expected"); + return; + } + } + lua_assert(f->k == VNONRELOC); + base = f->u.s.info; /* base register for call */ + if (hasmultret(args.k)) + nparams = LUA_MULTRET; /* open call */ + else { + if (args.k != VVOID) + luaK_exp2nextreg(fs, &args); /* close last argument */ + nparams = fs->freereg - (base+1); + } + init_exp(f, VCALL, luaK_codeABC(fs, OP_CALL, base, nparams+1, 2)); + luaK_fixline(fs, line); + fs->freereg = base+1; /* call remove function and arguments and leaves + (unless changed) one result */ +} + + + + +/* +** {====================================================================== +** Expression parsing +** ======================================================================= +*/ + + +static void prefixexp (LexState *ls, expdesc *v) { + /* prefixexp -> NAME | '(' expr ')' */ + switch (ls->t.token) { + case '(': { + int line = ls->linenumber; + luaX_next(ls); + expr(ls, v); + check_match(ls, ')', '(', line); + luaK_dischargevars(ls->fs, v); + return; + } + case TK_NAME: { + singlevar(ls, v); + return; + } + default: { + luaX_syntaxerror(ls, "unexpected symbol"); + return; + } + } +} + + +static void primaryexp (LexState *ls, expdesc *v) { + /* primaryexp -> + prefixexp { `.' NAME | `[' exp `]' | `:' NAME funcargs | funcargs } */ + FuncState *fs = ls->fs; + prefixexp(ls, v); + for (;;) { + switch (ls->t.token) { + case '.': { /* field */ + field(ls, v); + break; + } + case '[': { /* `[' exp1 `]' */ + expdesc key; + luaK_exp2anyreg(fs, v); + yindex(ls, &key); + luaK_indexed(fs, v, &key); + break; + } + case ':': { /* `:' NAME funcargs */ + expdesc key; + luaX_next(ls); + checkname(ls, &key); + luaK_self(fs, v, &key); + funcargs(ls, v); + break; + } + case '(': case TK_STRING: case '{': { /* funcargs */ + luaK_exp2nextreg(fs, v); + funcargs(ls, v); + break; + } + default: return; + } + } +} + + +static void simpleexp (LexState *ls, expdesc *v) { + /* simpleexp -> NUMBER | STRING | NIL | true | false | ... | + constructor | FUNCTION body | primaryexp */ + switch (ls->t.token) { + case TK_NUMBER: { + init_exp(v, VKNUM, 0); + v->u.nval = ls->t.seminfo.r; + break; + } + case TK_STRING: { + codestring(ls, v, ls->t.seminfo.ts); + break; + } + case TK_NIL: { + init_exp(v, VNIL, 0); + break; + } + case TK_TRUE: { + init_exp(v, VTRUE, 0); + break; + } + case TK_FALSE: { + init_exp(v, VFALSE, 0); + break; + } + case TK_DOTS: { /* vararg */ + FuncState *fs = ls->fs; + check_condition(ls, fs->f->is_vararg, + "cannot use " LUA_QL("...") " outside a vararg function"); + fs->f->is_vararg &= ~VARARG_NEEDSARG; /* don't need 'arg' */ + init_exp(v, VVARARG, luaK_codeABC(fs, OP_VARARG, 0, 1, 0)); + break; + } + case '{': { /* constructor */ + constructor(ls, v); + return; + } + case TK_FUNCTION: { + luaX_next(ls); + body(ls, v, 0, ls->linenumber); + return; + } + default: { + primaryexp(ls, v); + return; + } + } + luaX_next(ls); +} + + +static UnOpr getunopr (int op) { + switch (op) { + case TK_NOT: return OPR_NOT; + case '-': return OPR_MINUS; + case '#': return OPR_LEN; + default: return OPR_NOUNOPR; + } +} + + +static BinOpr getbinopr (int op) { + switch (op) { + case '+': return OPR_ADD; + case '-': return OPR_SUB; + case '*': return OPR_MUL; + case '/': return OPR_DIV; + case '%': return OPR_MOD; + case '^': return OPR_POW; + case TK_CONCAT: return OPR_CONCAT; + case TK_NE: return OPR_NE; + case TK_EQ: return OPR_EQ; + case '<': return OPR_LT; + case TK_LE: return OPR_LE; + case '>': return OPR_GT; + case TK_GE: return OPR_GE; + case TK_AND: return OPR_AND; + case TK_OR: return OPR_OR; + default: return OPR_NOBINOPR; + } +} + + +static const struct { + lu_byte left; /* left priority for each binary operator */ + lu_byte right; /* right priority */ +} priority[] = { /* ORDER OPR */ + {6, 6}, {6, 6}, {7, 7}, {7, 7}, {7, 7}, /* `+' `-' `/' `%' */ + {10, 9}, {5, 4}, /* power and concat (right associative) */ + {3, 3}, {3, 3}, /* equality and inequality */ + {3, 3}, {3, 3}, {3, 3}, {3, 3}, /* order */ + {2, 2}, {1, 1} /* logical (and/or) */ +}; + +#define UNARY_PRIORITY 8 /* priority for unary operators */ + + +/* +** subexpr -> (simpleexp | unop subexpr) { binop subexpr } +** where `binop' is any binary operator with a priority higher than `limit' +*/ +static BinOpr subexpr (LexState *ls, expdesc *v, unsigned int limit) { + BinOpr op; + UnOpr uop; + enterlevel(ls); + uop = getunopr(ls->t.token); + if (uop != OPR_NOUNOPR) { + luaX_next(ls); + subexpr(ls, v, UNARY_PRIORITY); + luaK_prefix(ls->fs, uop, v); + } + else simpleexp(ls, v); + /* expand while operators have priorities higher than `limit' */ + op = getbinopr(ls->t.token); + while (op != OPR_NOBINOPR && priority[op].left > limit) { + expdesc v2; + BinOpr nextop; + luaX_next(ls); + luaK_infix(ls->fs, op, v); + /* read sub-expression with higher priority */ + nextop = subexpr(ls, &v2, priority[op].right); + luaK_posfix(ls->fs, op, v, &v2); + op = nextop; + } + leavelevel(ls); + return op; /* return first untreated operator */ +} + + +static void expr (LexState *ls, expdesc *v) { + subexpr(ls, v, 0); +} + +/* }==================================================================== */ + + + +/* +** {====================================================================== +** Rules for Statements +** ======================================================================= +*/ + + +static int block_follow (int token) { + switch (token) { + case TK_ELSE: case TK_ELSEIF: case TK_END: + case TK_UNTIL: case TK_EOS: + return 1; + default: return 0; + } +} + + +static void block (LexState *ls) { + /* block -> chunk */ + FuncState *fs = ls->fs; + BlockCnt bl; + enterblock(fs, &bl, 0); + chunk(ls); + lua_assert(bl.breaklist == NO_JUMP); + leaveblock(fs); +} + + +/* +** structure to chain all variables in the left-hand side of an +** assignment +*/ +struct LHS_assign { + struct LHS_assign *prev; + expdesc v; /* variable (global, local, upvalue, or indexed) */ +}; + + +/* +** check whether, in an assignment to a local variable, the local variable +** is needed in a previous assignment (to a table). If so, save original +** local value in a safe place and use this safe copy in the previous +** assignment. +*/ +static void check_conflict (LexState *ls, struct LHS_assign *lh, expdesc *v) { + FuncState *fs = ls->fs; + int extra = fs->freereg; /* eventual position to save local variable */ + int conflict = 0; + for (; lh; lh = lh->prev) { + if (lh->v.k == VINDEXED) { + if (lh->v.u.s.info == v->u.s.info) { /* conflict? */ + conflict = 1; + lh->v.u.s.info = extra; /* previous assignment will use safe copy */ + } + if (lh->v.u.s.aux == v->u.s.info) { /* conflict? */ + conflict = 1; + lh->v.u.s.aux = extra; /* previous assignment will use safe copy */ + } + } + } + if (conflict) { + luaK_codeABC(fs, OP_MOVE, fs->freereg, v->u.s.info, 0); /* make copy */ + luaK_reserveregs(fs, 1); + } +} + + +static void assignment (LexState *ls, struct LHS_assign *lh, int nvars) { + expdesc e; + check_condition(ls, VLOCAL <= lh->v.k && lh->v.k <= VINDEXED, + "syntax error"); + if (testnext(ls, ',')) { /* assignment -> `,' primaryexp assignment */ + struct LHS_assign nv; + nv.prev = lh; + primaryexp(ls, &nv.v); + if (nv.v.k == VLOCAL) + check_conflict(ls, lh, &nv.v); + assignment(ls, &nv, nvars+1); + } + else { /* assignment -> `=' explist1 */ + int nexps; + checknext(ls, '='); + nexps = explist1(ls, &e); + if (nexps != nvars) { + adjust_assign(ls, nvars, nexps, &e); + if (nexps > nvars) + ls->fs->freereg -= nexps - nvars; /* remove extra values */ + } + else { + luaK_setoneret(ls->fs, &e); /* close last expression */ + luaK_storevar(ls->fs, &lh->v, &e); + return; /* avoid default */ + } + } + init_exp(&e, VNONRELOC, ls->fs->freereg-1); /* default assignment */ + luaK_storevar(ls->fs, &lh->v, &e); +} + + +static int cond (LexState *ls) { + /* cond -> exp */ + expdesc v; + expr(ls, &v); /* read condition */ + if (v.k == VNIL) v.k = VFALSE; /* `falses' are all equal here */ + luaK_goiftrue(ls->fs, &v); + return v.f; +} + + +static void breakstat (LexState *ls) { + FuncState *fs = ls->fs; + BlockCnt *bl = fs->bl; + int upval = 0; + while (bl && !bl->isbreakable) { + upval |= bl->upval; + bl = bl->previous; + } + if (!bl) + luaX_syntaxerror(ls, "no loop to break"); + if (upval) + luaK_codeABC(fs, OP_CLOSE, bl->nactvar, 0, 0); + luaK_concat(fs, &bl->breaklist, luaK_jump(fs)); +} + + +static void whilestat (LexState *ls, int line) { + /* whilestat -> WHILE cond DO block END */ + FuncState *fs = ls->fs; + int whileinit; + int condexit; + BlockCnt bl; + luaX_next(ls); /* skip WHILE */ + whileinit = luaK_getlabel(fs); + condexit = cond(ls); + enterblock(fs, &bl, 1); + checknext(ls, TK_DO); + block(ls); + luaK_patchlist(fs, luaK_jump(fs), whileinit); + check_match(ls, TK_END, TK_WHILE, line); + leaveblock(fs); + luaK_patchtohere(fs, condexit); /* false conditions finish the loop */ +} + + +static void repeatstat (LexState *ls, int line) { + /* repeatstat -> REPEAT block UNTIL cond */ + int condexit; + FuncState *fs = ls->fs; + int repeat_init = luaK_getlabel(fs); + BlockCnt bl1, bl2; + enterblock(fs, &bl1, 1); /* loop block */ + enterblock(fs, &bl2, 0); /* scope block */ + luaX_next(ls); /* skip REPEAT */ + chunk(ls); + check_match(ls, TK_UNTIL, TK_REPEAT, line); + condexit = cond(ls); /* read condition (inside scope block) */ + if (!bl2.upval) { /* no upvalues? */ + leaveblock(fs); /* finish scope */ + luaK_patchlist(ls->fs, condexit, repeat_init); /* close the loop */ + } + else { /* complete semantics when there are upvalues */ + breakstat(ls); /* if condition then break */ + luaK_patchtohere(ls->fs, condexit); /* else... */ + leaveblock(fs); /* finish scope... */ + luaK_patchlist(ls->fs, luaK_jump(fs), repeat_init); /* and repeat */ + } + leaveblock(fs); /* finish loop */ +} + + +static int exp1 (LexState *ls) { + expdesc e; + int k; + expr(ls, &e); + k = e.k; + luaK_exp2nextreg(ls->fs, &e); + return k; +} + + +static void forbody (LexState *ls, int base, int line, int nvars, int isnum) { + /* forbody -> DO block */ + BlockCnt bl; + FuncState *fs = ls->fs; + int prep, endfor; + adjustlocalvars(ls, 3); /* control variables */ + checknext(ls, TK_DO); + prep = isnum ? luaK_codeAsBx(fs, OP_FORPREP, base, NO_JUMP) : luaK_jump(fs); + enterblock(fs, &bl, 0); /* scope for declared variables */ + adjustlocalvars(ls, nvars); + luaK_reserveregs(fs, nvars); + block(ls); + leaveblock(fs); /* end of scope for declared variables */ + luaK_patchtohere(fs, prep); + endfor = (isnum) ? luaK_codeAsBx(fs, OP_FORLOOP, base, NO_JUMP) : + luaK_codeABC(fs, OP_TFORLOOP, base, 0, nvars); + luaK_fixline(fs, line); /* pretend that `OP_FOR' starts the loop */ + luaK_patchlist(fs, (isnum ? endfor : luaK_jump(fs)), prep + 1); +} + + +static void fornum (LexState *ls, TString *varname, int line) { + /* fornum -> NAME = exp1,exp1[,exp1] forbody */ + FuncState *fs = ls->fs; + int base = fs->freereg; + new_localvarliteral(ls, "(for index)", 0); + new_localvarliteral(ls, "(for limit)", 1); + new_localvarliteral(ls, "(for step)", 2); + new_localvar(ls, varname, 3); + checknext(ls, '='); + exp1(ls); /* initial value */ + checknext(ls, ','); + exp1(ls); /* limit */ + if (testnext(ls, ',')) + exp1(ls); /* optional step */ + else { /* default step = 1 */ + luaK_codeABx(fs, OP_LOADK, fs->freereg, luaK_numberK(fs, 1)); + luaK_reserveregs(fs, 1); + } + forbody(ls, base, line, 1, 1); +} + + +static void forlist (LexState *ls, TString *indexname) { + /* forlist -> NAME {,NAME} IN explist1 forbody */ + FuncState *fs = ls->fs; + expdesc e; + int nvars = 0; + int line; + int base = fs->freereg; + /* create control variables */ + new_localvarliteral(ls, "(for generator)", nvars++); + new_localvarliteral(ls, "(for state)", nvars++); + new_localvarliteral(ls, "(for control)", nvars++); + /* create declared variables */ + new_localvar(ls, indexname, nvars++); + while (testnext(ls, ',')) + new_localvar(ls, str_checkname(ls), nvars++); + checknext(ls, TK_IN); + line = ls->linenumber; + adjust_assign(ls, 3, explist1(ls, &e), &e); + luaK_checkstack(fs, 3); /* extra space to call generator */ + forbody(ls, base, line, nvars - 3, 0); +} + + +static void forstat (LexState *ls, int line) { + /* forstat -> FOR (fornum | forlist) END */ + FuncState *fs = ls->fs; + TString *varname; + BlockCnt bl; + enterblock(fs, &bl, 1); /* scope for loop and control variables */ + luaX_next(ls); /* skip `for' */ + varname = str_checkname(ls); /* first variable name */ + switch (ls->t.token) { + case '=': fornum(ls, varname, line); break; + case ',': case TK_IN: forlist(ls, varname); break; + default: luaX_syntaxerror(ls, LUA_QL("=") " or " LUA_QL("in") " expected"); + } + check_match(ls, TK_END, TK_FOR, line); + leaveblock(fs); /* loop scope (`break' jumps to this point) */ +} + + +static int test_then_block (LexState *ls) { + /* test_then_block -> [IF | ELSEIF] cond THEN block */ + int condexit; + luaX_next(ls); /* skip IF or ELSEIF */ + condexit = cond(ls); + checknext(ls, TK_THEN); + block(ls); /* `then' part */ + return condexit; +} + + +static void ifstat (LexState *ls, int line) { + /* ifstat -> IF cond THEN block {ELSEIF cond THEN block} [ELSE block] END */ + FuncState *fs = ls->fs; + int flist; + int escapelist = NO_JUMP; + flist = test_then_block(ls); /* IF cond THEN block */ + while (ls->t.token == TK_ELSEIF) { + luaK_concat(fs, &escapelist, luaK_jump(fs)); + luaK_patchtohere(fs, flist); + flist = test_then_block(ls); /* ELSEIF cond THEN block */ + } + if (ls->t.token == TK_ELSE) { + luaK_concat(fs, &escapelist, luaK_jump(fs)); + luaK_patchtohere(fs, flist); + luaX_next(ls); /* skip ELSE (after patch, for correct line info) */ + block(ls); /* `else' part */ + } + else + luaK_concat(fs, &escapelist, flist); + luaK_patchtohere(fs, escapelist); + check_match(ls, TK_END, TK_IF, line); +} + + +static void localfunc (LexState *ls) { + expdesc v, b; + FuncState *fs = ls->fs; + new_localvar(ls, str_checkname(ls), 0); + init_exp(&v, VLOCAL, fs->freereg); + luaK_reserveregs(fs, 1); + adjustlocalvars(ls, 1); + body(ls, &b, 0, ls->linenumber); + luaK_storevar(fs, &v, &b); + /* debug information will only see the variable after this point! */ + getlocvar(fs, fs->nactvar - 1).startpc = fs->pc; +} + + +static void localstat (LexState *ls) { + /* stat -> LOCAL NAME {`,' NAME} [`=' explist1] */ + int nvars = 0; + int nexps; + expdesc e; + do { + new_localvar(ls, str_checkname(ls), nvars++); + } while (testnext(ls, ',')); + if (testnext(ls, '=')) + nexps = explist1(ls, &e); + else { + e.k = VVOID; + nexps = 0; + } + adjust_assign(ls, nvars, nexps, &e); + adjustlocalvars(ls, nvars); +} + + +static int funcname (LexState *ls, expdesc *v) { + /* funcname -> NAME {field} [`:' NAME] */ + int needself = 0; + singlevar(ls, v); + while (ls->t.token == '.') + field(ls, v); + if (ls->t.token == ':') { + needself = 1; + field(ls, v); + } + return needself; +} + + +static void funcstat (LexState *ls, int line) { + /* funcstat -> FUNCTION funcname body */ + int needself; + expdesc v, b; + luaX_next(ls); /* skip FUNCTION */ + needself = funcname(ls, &v); + body(ls, &b, needself, line); + luaK_storevar(ls->fs, &v, &b); + luaK_fixline(ls->fs, line); /* definition `happens' in the first line */ +} + + +static void exprstat (LexState *ls) { + /* stat -> func | assignment */ + FuncState *fs = ls->fs; + struct LHS_assign v; + primaryexp(ls, &v.v); + if (v.v.k == VCALL) /* stat -> func */ + SETARG_C(getcode(fs, &v.v), 1); /* call statement uses no results */ + else { /* stat -> assignment */ + v.prev = NULL; + assignment(ls, &v, 1); + } +} + + +static void retstat (LexState *ls) { + /* stat -> RETURN explist */ + FuncState *fs = ls->fs; + expdesc e; + int first, nret; /* registers with returned values */ + luaX_next(ls); /* skip RETURN */ + if (block_follow(ls->t.token) || ls->t.token == ';') + first = nret = 0; /* return no values */ + else { + nret = explist1(ls, &e); /* optional return values */ + if (hasmultret(e.k)) { + luaK_setmultret(fs, &e); + if (e.k == VCALL && nret == 1) { /* tail call? */ + SET_OPCODE(getcode(fs,&e), OP_TAILCALL); + lua_assert(GETARG_A(getcode(fs,&e)) == fs->nactvar); + } + first = fs->nactvar; + nret = LUA_MULTRET; /* return all values */ + } + else { + if (nret == 1) /* only one single value? */ + first = luaK_exp2anyreg(fs, &e); + else { + luaK_exp2nextreg(fs, &e); /* values must go to the `stack' */ + first = fs->nactvar; /* return all `active' values */ + lua_assert(nret == fs->freereg - first); + } + } + } + luaK_ret(fs, first, nret); +} + + +static int statement (LexState *ls) { + int line = ls->linenumber; /* may be needed for error messages */ + switch (ls->t.token) { + case TK_IF: { /* stat -> ifstat */ + ifstat(ls, line); + return 0; + } + case TK_WHILE: { /* stat -> whilestat */ + whilestat(ls, line); + return 0; + } + case TK_DO: { /* stat -> DO block END */ + luaX_next(ls); /* skip DO */ + block(ls); + check_match(ls, TK_END, TK_DO, line); + return 0; + } + case TK_FOR: { /* stat -> forstat */ + forstat(ls, line); + return 0; + } + case TK_REPEAT: { /* stat -> repeatstat */ + repeatstat(ls, line); + return 0; + } + case TK_FUNCTION: { + funcstat(ls, line); /* stat -> funcstat */ + return 0; + } + case TK_LOCAL: { /* stat -> localstat */ + luaX_next(ls); /* skip LOCAL */ + if (testnext(ls, TK_FUNCTION)) /* local function? */ + localfunc(ls); + else + localstat(ls); + return 0; + } + case TK_RETURN: { /* stat -> retstat */ + retstat(ls); + return 1; /* must be last statement */ + } + case TK_BREAK: { /* stat -> breakstat */ + luaX_next(ls); /* skip BREAK */ + breakstat(ls); + return 1; /* must be last statement */ + } + default: { + exprstat(ls); + return 0; /* to avoid warnings */ + } + } +} + + +static void chunk (LexState *ls) { + /* chunk -> { stat [`;'] } */ + int islast = 0; + enterlevel(ls); + while (!islast && !block_follow(ls->t.token)) { + islast = statement(ls); + testnext(ls, ';'); + lua_assert(ls->fs->f->maxstacksize >= ls->fs->freereg && + ls->fs->freereg >= ls->fs->nactvar); + ls->fs->freereg = ls->fs->nactvar; /* free registers */ + } + leavelevel(ls); +} + +/* }====================================================================== */ diff --git a/deps/lua/src/lparser.h b/deps/lua/src/lparser.h new file mode 100644 index 0000000000000000000000000000000000000000..d5e6e81d0d4bba0728a00cf6a82a388e339dc8b2 --- /dev/null +++ b/deps/lua/src/lparser.h @@ -0,0 +1,83 @@ +/* +** $Id: lparser.h,v 1.56 2005/10/03 14:02:40 roberto Exp $ +** Lua Parser +** See Copyright Notice in lua.h +*/ + +#ifndef lparser_h +#define lparser_h + +#include "llimits.h" +#include "lobject.h" +#include "ltable.h" +#include "lzio.h" + + +/* +** Expression descriptor +*/ + +typedef enum { + VVOID, /* no value */ + VNIL, + VTRUE, + VFALSE, + VK, /* info = index of constant in `k' */ + VKNUM, /* nval = numerical value */ + VLOCAL, /* info = local register */ + VUPVAL, /* info = index of upvalue in `upvalues' */ + VGLOBAL, /* info = index of table; aux = index of global name in `k' */ + VINDEXED, /* info = table register; aux = index register (or `k') */ + VJMP, /* info = instruction pc */ + VRELOCABLE, /* info = instruction pc */ + VNONRELOC, /* info = result register */ + VCALL, /* info = instruction pc */ + VVARARG /* info = instruction pc */ +} expkind; + +typedef struct expdesc { + expkind k; + union { + struct { int info, aux; } s; + lua_Number nval; + } u; + int t; /* patch list of `exit when true' */ + int f; /* patch list of `exit when false' */ +} expdesc; + + +typedef struct upvaldesc { + lu_byte k; + lu_byte info; +} upvaldesc; + + +struct BlockCnt; /* defined in lparser.c */ + + +/* state needed to generate code for a given function */ +typedef struct FuncState { + Proto *f; /* current function header */ + Table *h; /* table to find (and reuse) elements in `k' */ + struct FuncState *prev; /* enclosing function */ + struct LexState *ls; /* lexical state */ + struct lua_State *L; /* copy of the Lua state */ + struct BlockCnt *bl; /* chain of current blocks */ + int pc; /* next position to code (equivalent to `ncode') */ + int lasttarget; /* `pc' of last `jump target' */ + int jpc; /* list of pending jumps to `pc' */ + int freereg; /* first free register */ + int nk; /* number of elements in `k' */ + int np; /* number of elements in `p' */ + short nlocvars; /* number of elements in `locvars' */ + lu_byte nactvar; /* number of active local variables */ + upvaldesc upvalues[LUAI_MAXUPVALUES]; /* upvalues */ + unsigned short actvar[LUAI_MAXVARS]; /* declared-variable stack */ +} FuncState; + + +LUAI_FUNC Proto *luaY_parser (lua_State *L, ZIO *z, Mbuffer *buff, + const char *name); + + +#endif diff --git a/deps/lua/src/lstate.c b/deps/lua/src/lstate.c new file mode 100644 index 0000000000000000000000000000000000000000..77e93fbdfa056a05c5de3b9ad09dee4e3e2e46f7 --- /dev/null +++ b/deps/lua/src/lstate.c @@ -0,0 +1,214 @@ +/* +** $Id: lstate.c,v 2.35 2005/10/06 20:46:25 roberto Exp $ +** Global State +** See Copyright Notice in lua.h +*/ + + +#include + +#define lstate_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "llex.h" +#include "lmem.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" + + +#define state_size(x) (sizeof(x) + LUAI_EXTRASPACE) +#define fromstate(l) (cast(lu_byte *, (l)) - LUAI_EXTRASPACE) +#define tostate(l) (cast(lua_State *, cast(lu_byte *, l) + LUAI_EXTRASPACE)) + + +/* +** Main thread combines a thread state and the global state +*/ +typedef struct LG { + lua_State l; + global_State g; +} LG; + + + +static void stack_init (lua_State *L1, lua_State *L) { + /* initialize CallInfo array */ + L1->base_ci = luaM_newvector(L, BASIC_CI_SIZE, CallInfo); + L1->ci = L1->base_ci; + L1->size_ci = BASIC_CI_SIZE; + L1->end_ci = L1->base_ci + L1->size_ci - 1; + /* initialize stack array */ + L1->stack = luaM_newvector(L, BASIC_STACK_SIZE + EXTRA_STACK, TValue); + L1->stacksize = BASIC_STACK_SIZE + EXTRA_STACK; + L1->top = L1->stack; + L1->stack_last = L1->stack+(L1->stacksize - EXTRA_STACK)-1; + /* initialize first ci */ + L1->ci->func = L1->top; + setnilvalue(L1->top++); /* `function' entry for this `ci' */ + L1->base = L1->ci->base = L1->top; + L1->ci->top = L1->top + LUA_MINSTACK; +} + + +static void freestack (lua_State *L, lua_State *L1) { + luaM_freearray(L, L1->base_ci, L1->size_ci, CallInfo); + luaM_freearray(L, L1->stack, L1->stacksize, TValue); +} + + +/* +** open parts that may cause memory-allocation errors +*/ +static void f_luaopen (lua_State *L, void *ud) { + global_State *g = G(L); + UNUSED(ud); + stack_init(L, L); /* init stack */ + sethvalue(L, gt(L), luaH_new(L, 0, 2)); /* table of globals */ + sethvalue(L, registry(L), luaH_new(L, 0, 2)); /* registry */ + luaS_resize(L, MINSTRTABSIZE); /* initial size of string table */ + luaT_init(L); + luaX_init(L); + luaS_fix(luaS_newliteral(L, MEMERRMSG)); + g->GCthreshold = 4*g->totalbytes; +} + + +static void preinit_state (lua_State *L, global_State *g) { + G(L) = g; + L->stack = NULL; + L->stacksize = 0; + L->errorJmp = NULL; + L->hook = NULL; + L->hookmask = 0; + L->basehookcount = 0; + L->allowhook = 1; + resethookcount(L); + L->openupval = NULL; + L->size_ci = 0; + L->nCcalls = 0; + L->status = 0; + L->base_ci = L->ci = NULL; + L->savedpc = NULL; + L->errfunc = 0; + setnilvalue(gt(L)); +} + + +static void close_state (lua_State *L) { + global_State *g = G(L); + luaF_close(L, L->stack); /* close all upvalues for this thread */ + luaC_freeall(L); /* collect all objects */ + lua_assert(g->rootgc == obj2gco(L)); + lua_assert(g->strt.nuse == 0); + luaM_freearray(L, G(L)->strt.hash, G(L)->strt.size, TString *); + luaZ_freebuffer(L, &g->buff); + freestack(L, L); + lua_assert(g->totalbytes == sizeof(LG)); + (*g->frealloc)(g->ud, fromstate(L), state_size(LG), 0); +} + + +lua_State *luaE_newthread (lua_State *L) { + lua_State *L1 = tostate(luaM_malloc(L, state_size(lua_State))); + luaC_link(L, obj2gco(L1), LUA_TTHREAD); + preinit_state(L1, G(L)); + stack_init(L1, L); /* init stack */ + setobj2n(L, gt(L1), gt(L)); /* share table of globals */ + L1->hookmask = L->hookmask; + L1->basehookcount = L->basehookcount; + L1->hook = L->hook; + resethookcount(L1); + lua_assert(iswhite(obj2gco(L1))); + return L1; +} + + +void luaE_freethread (lua_State *L, lua_State *L1) { + luaF_close(L1, L1->stack); /* close all upvalues for this thread */ + lua_assert(L1->openupval == NULL); + luai_userstatefree(L1); + freestack(L, L1); + luaM_freemem(L, fromstate(L1), state_size(lua_State)); +} + + +LUA_API lua_State *lua_newstate (lua_Alloc f, void *ud) { + int i; + lua_State *L; + global_State *g; + void *l = (*f)(ud, NULL, 0, state_size(LG)); + if (l == NULL) return NULL; + L = tostate(l); + g = &((LG *)L)->g; + L->next = NULL; + L->tt = LUA_TTHREAD; + g->currentwhite = bit2mask(WHITE0BIT, FIXEDBIT); + L->marked = luaC_white(g); + set2bits(L->marked, FIXEDBIT, SFIXEDBIT); + preinit_state(L, g); + g->frealloc = f; + g->ud = ud; + g->mainthread = L; + g->uvhead.u.l.prev = &g->uvhead; + g->uvhead.u.l.next = &g->uvhead; + g->GCthreshold = 0; /* mark it as unfinished state */ + g->strt.size = 0; + g->strt.nuse = 0; + g->strt.hash = NULL; + setnilvalue(registry(L)); + luaZ_initbuffer(L, &g->buff); + g->panic = NULL; + g->gcstate = GCSpause; + g->rootgc = obj2gco(L); + g->sweepstrgc = 0; + g->sweepgc = &g->rootgc; + g->gray = NULL; + g->grayagain = NULL; + g->weak = NULL; + g->tmudata = NULL; + g->totalbytes = sizeof(LG); + g->gcpause = LUAI_GCPAUSE; + g->gcstepmul = LUAI_GCMUL; + g->gcdept = 0; + for (i=0; imt[i] = NULL; + if (luaD_rawrunprotected(L, f_luaopen, NULL) != 0) { + /* memory allocation error: free partial state */ + close_state(L); + L = NULL; + } + else + luai_userstateopen(L); + return L; +} + + +static void callallgcTM (lua_State *L, void *ud) { + UNUSED(ud); + luaC_callGCTM(L); /* call GC metamethods for all udata */ +} + + +LUA_API void lua_close (lua_State *L) { + L = G(L)->mainthread; /* only the main thread can be closed */ + luai_userstateclose(L); + lua_lock(L); + luaF_close(L, L->stack); /* close all upvalues for this thread */ + luaC_separateudata(L, 1); /* separate udata that have GC metamethods */ + L->errfunc = 0; /* no error function during GC metamethods */ + do { /* repeat until no more errors */ + L->ci = L->base_ci; + L->base = L->top = L->ci->base; + L->nCcalls = 0; + } while (luaD_rawrunprotected(L, callallgcTM, NULL) != 0); + lua_assert(G(L)->tmudata == NULL); + close_state(L); +} + diff --git a/deps/lua/src/lstate.h b/deps/lua/src/lstate.h new file mode 100644 index 0000000000000000000000000000000000000000..d296a4cab99e1cb1ec5f09bc0cd1bfc74e1cd5cc --- /dev/null +++ b/deps/lua/src/lstate.h @@ -0,0 +1,168 @@ +/* +** $Id: lstate.h,v 2.24 2006/02/06 18:27:59 roberto Exp $ +** Global State +** See Copyright Notice in lua.h +*/ + +#ifndef lstate_h +#define lstate_h + +#include "lua.h" + +#include "lobject.h" +#include "ltm.h" +#include "lzio.h" + + + +struct lua_longjmp; /* defined in ldo.c */ + + +/* table of globals */ +#define gt(L) (&L->l_gt) + +/* registry */ +#define registry(L) (&G(L)->l_registry) + + +/* extra stack space to handle TM calls and some other extras */ +#define EXTRA_STACK 5 + + +#define BASIC_CI_SIZE 8 + +#define BASIC_STACK_SIZE (2*LUA_MINSTACK) + + + +typedef struct stringtable { + GCObject **hash; + lu_int32 nuse; /* number of elements */ + int size; +} stringtable; + + +/* +** informations about a call +*/ +typedef struct CallInfo { + StkId base; /* base for this function */ + StkId func; /* function index in the stack */ + StkId top; /* top for this function */ + const Instruction *savedpc; + int nresults; /* expected number of results from this function */ + int tailcalls; /* number of tail calls lost under this entry */ +} CallInfo; + + + +#define curr_func(L) (clvalue(L->ci->func)) +#define ci_func(ci) (clvalue((ci)->func)) +#define f_isLua(ci) (!ci_func(ci)->c.isC) +#define isLua(ci) (ttisfunction((ci)->func) && f_isLua(ci)) + + +/* +** `global state', shared by all threads of this state +*/ +typedef struct global_State { + stringtable strt; /* hash table for strings */ + lua_Alloc frealloc; /* function to reallocate memory */ + void *ud; /* auxiliary data to `frealloc' */ + lu_byte currentwhite; + lu_byte gcstate; /* state of garbage collector */ + int sweepstrgc; /* position of sweep in `strt' */ + GCObject *rootgc; /* list of all collectable objects */ + GCObject **sweepgc; /* position of sweep in `rootgc' */ + GCObject *gray; /* list of gray objects */ + GCObject *grayagain; /* list of objects to be traversed atomically */ + GCObject *weak; /* list of weak tables (to be cleared) */ + GCObject *tmudata; /* last element of list of userdata to be GC */ + Mbuffer buff; /* temporary buffer for string concatentation */ + lu_mem GCthreshold; + lu_mem totalbytes; /* number of bytes currently allocated */ + lu_mem estimate; /* an estimate of number of bytes actually in use */ + lu_mem gcdept; /* how much GC is `behind schedule' */ + int gcpause; /* size of pause between successive GCs */ + int gcstepmul; /* GC `granularity' */ + lua_CFunction panic; /* to be called in unprotected errors */ + TValue l_registry; + struct lua_State *mainthread; + UpVal uvhead; /* head of double-linked list of all open upvalues */ + struct Table *mt[NUM_TAGS]; /* metatables for basic types */ + TString *tmname[TM_N]; /* array with tag-method names */ +} global_State; + + +/* +** `per thread' state +*/ +struct lua_State { + CommonHeader; + lu_byte status; + StkId top; /* first free slot in the stack */ + StkId base; /* base of current function */ + global_State *l_G; + CallInfo *ci; /* call info for current function */ + const Instruction *savedpc; /* `savedpc' of current function */ + StkId stack_last; /* last free slot in the stack */ + StkId stack; /* stack base */ + CallInfo *end_ci; /* points after end of ci array*/ + CallInfo *base_ci; /* array of CallInfo's */ + int stacksize; + int size_ci; /* size of array `base_ci' */ + unsigned short nCcalls; /* number of nested C calls */ + lu_byte hookmask; + lu_byte allowhook; + int basehookcount; + int hookcount; + lua_Hook hook; + TValue l_gt; /* table of globals */ + TValue env; /* temporary place for environments */ + GCObject *openupval; /* list of open upvalues in this stack */ + GCObject *gclist; + struct lua_longjmp *errorJmp; /* current error recover point */ + ptrdiff_t errfunc; /* current error handling function (stack index) */ +}; + + +#define G(L) (L->l_G) + + +/* +** Union of all collectable objects +*/ +union GCObject { + GCheader gch; + union TString ts; + union Udata u; + union Closure cl; + struct Table h; + struct Proto p; + struct UpVal uv; + struct lua_State th; /* thread */ +}; + + +/* macros to convert a GCObject into a specific value */ +#define rawgco2ts(o) check_exp((o)->gch.tt == LUA_TSTRING, &((o)->ts)) +#define gco2ts(o) (&rawgco2ts(o)->tsv) +#define rawgco2u(o) check_exp((o)->gch.tt == LUA_TUSERDATA, &((o)->u)) +#define gco2u(o) (&rawgco2u(o)->uv) +#define gco2cl(o) check_exp((o)->gch.tt == LUA_TFUNCTION, &((o)->cl)) +#define gco2h(o) check_exp((o)->gch.tt == LUA_TTABLE, &((o)->h)) +#define gco2p(o) check_exp((o)->gch.tt == LUA_TPROTO, &((o)->p)) +#define gco2uv(o) check_exp((o)->gch.tt == LUA_TUPVAL, &((o)->uv)) +#define ngcotouv(o) \ + check_exp((o) == NULL || (o)->gch.tt == LUA_TUPVAL, &((o)->uv)) +#define gco2th(o) check_exp((o)->gch.tt == LUA_TTHREAD, &((o)->th)) + +/* macro to convert any Lua object into a GCObject */ +#define obj2gco(v) (cast(GCObject *, (v))) + + +LUAI_FUNC lua_State *luaE_newthread (lua_State *L); +LUAI_FUNC void luaE_freethread (lua_State *L, lua_State *L1); + +#endif + diff --git a/deps/lua/src/lstring.c b/deps/lua/src/lstring.c new file mode 100644 index 0000000000000000000000000000000000000000..4319930c96d07e61fe98b697b87c538280cf38c9 --- /dev/null +++ b/deps/lua/src/lstring.c @@ -0,0 +1,111 @@ +/* +** $Id: lstring.c,v 2.8 2005/12/22 16:19:56 roberto Exp $ +** String table (keeps all strings handled by Lua) +** See Copyright Notice in lua.h +*/ + + +#include + +#define lstring_c +#define LUA_CORE + +#include "lua.h" + +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" + + + +void luaS_resize (lua_State *L, int newsize) { + GCObject **newhash; + stringtable *tb; + int i; + if (G(L)->gcstate == GCSsweepstring) + return; /* cannot resize during GC traverse */ + newhash = luaM_newvector(L, newsize, GCObject *); + tb = &G(L)->strt; + for (i=0; isize; i++) { + GCObject *p = tb->hash[i]; + while (p) { /* for each node in the list */ + GCObject *next = p->gch.next; /* save next */ + unsigned int h = gco2ts(p)->hash; + int h1 = lmod(h, newsize); /* new position */ + lua_assert(cast_int(h%newsize) == lmod(h, newsize)); + p->gch.next = newhash[h1]; /* chain it */ + newhash[h1] = p; + p = next; + } + } + luaM_freearray(L, tb->hash, tb->size, TString *); + tb->size = newsize; + tb->hash = newhash; +} + + +static TString *newlstr (lua_State *L, const char *str, size_t l, + unsigned int h) { + TString *ts; + stringtable *tb; + if (l+1 > (MAX_SIZET - sizeof(TString))/sizeof(char)) + luaM_toobig(L); + ts = cast(TString *, luaM_malloc(L, (l+1)*sizeof(char)+sizeof(TString))); + ts->tsv.len = l; + ts->tsv.hash = h; + ts->tsv.marked = luaC_white(G(L)); + ts->tsv.tt = LUA_TSTRING; + ts->tsv.reserved = 0; + memcpy(ts+1, str, l*sizeof(char)); + ((char *)(ts+1))[l] = '\0'; /* ending 0 */ + tb = &G(L)->strt; + h = lmod(h, tb->size); + ts->tsv.next = tb->hash[h]; /* chain new entry */ + tb->hash[h] = obj2gco(ts); + tb->nuse++; + if (tb->nuse > cast(lu_int32, tb->size) && tb->size <= MAX_INT/2) + luaS_resize(L, tb->size*2); /* too crowded */ + return ts; +} + + +TString *luaS_newlstr (lua_State *L, const char *str, size_t l) { + GCObject *o; + unsigned int h = cast(unsigned int, l); /* seed */ + size_t step = (l>>5)+1; /* if string is too long, don't hash all its chars */ + size_t l1; + for (l1=l; l1>=step; l1-=step) /* compute hash */ + h = h ^ ((h<<5)+(h>>2)+cast(unsigned char, str[l1-1])); + for (o = G(L)->strt.hash[lmod(h, G(L)->strt.size)]; + o != NULL; + o = o->gch.next) { + TString *ts = rawgco2ts(o); + if (ts->tsv.len == l && (memcmp(str, getstr(ts), l) == 0)) { + /* string may be dead */ + if (isdead(G(L), o)) changewhite(o); + return ts; + } + } + return newlstr(L, str, l, h); /* not found */ +} + + +Udata *luaS_newudata (lua_State *L, size_t s, Table *e) { + Udata *u; + if (s > MAX_SIZET - sizeof(Udata)) + luaM_toobig(L); + u = cast(Udata *, luaM_malloc(L, s + sizeof(Udata))); + u->uv.marked = luaC_white(G(L)); /* is not finalized */ + u->uv.tt = LUA_TUSERDATA; + u->uv.len = s; + u->uv.metatable = NULL; + u->uv.env = e; + /* chain it on udata list (after main thread) */ + u->uv.next = G(L)->mainthread->next; + G(L)->mainthread->next = obj2gco(u); + return u; +} + diff --git a/deps/lua/src/lstring.h b/deps/lua/src/lstring.h new file mode 100644 index 0000000000000000000000000000000000000000..1d2e91ea13a31bcfbdbee4c63dc17d1e99e9f8e4 --- /dev/null +++ b/deps/lua/src/lstring.h @@ -0,0 +1,31 @@ +/* +** $Id: lstring.h,v 1.43 2005/04/25 19:24:10 roberto Exp $ +** String table (keep all strings handled by Lua) +** See Copyright Notice in lua.h +*/ + +#ifndef lstring_h +#define lstring_h + + +#include "lgc.h" +#include "lobject.h" +#include "lstate.h" + + +#define sizestring(s) (sizeof(union TString)+((s)->len+1)*sizeof(char)) + +#define sizeudata(u) (sizeof(union Udata)+(u)->len) + +#define luaS_new(L, s) (luaS_newlstr(L, s, strlen(s))) +#define luaS_newliteral(L, s) (luaS_newlstr(L, "" s, \ + (sizeof(s)/sizeof(char))-1)) + +#define luaS_fix(s) l_setbit((s)->tsv.marked, FIXEDBIT) + +LUAI_FUNC void luaS_resize (lua_State *L, int newsize); +LUAI_FUNC Udata *luaS_newudata (lua_State *L, size_t s, Table *e); +LUAI_FUNC TString *luaS_newlstr (lua_State *L, const char *str, size_t l); + + +#endif diff --git a/deps/lua/src/lstrlib.c b/deps/lua/src/lstrlib.c new file mode 100644 index 0000000000000000000000000000000000000000..84478fd106c6e96203d68db4e642302d1fba745a --- /dev/null +++ b/deps/lua/src/lstrlib.c @@ -0,0 +1,863 @@ +/* +** $Id: lstrlib.c,v 1.130 2005/12/29 15:32:11 roberto Exp $ +** Standard library for string operations and pattern-matching +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include +#include + +#define lstrlib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +/* macro to `unsign' a character */ +#define uchar(c) ((unsigned char)(c)) + + + +static int str_len (lua_State *L) { + size_t l; + luaL_checklstring(L, 1, &l); + lua_pushinteger(L, l); + return 1; +} + + +static ptrdiff_t posrelat (ptrdiff_t pos, size_t len) { + /* relative string position: negative means back from end */ + return (pos>=0) ? pos : (ptrdiff_t)len+pos+1; +} + + +static int str_sub (lua_State *L) { + size_t l; + const char *s = luaL_checklstring(L, 1, &l); + ptrdiff_t start = posrelat(luaL_checkinteger(L, 2), l); + ptrdiff_t end = posrelat(luaL_optinteger(L, 3, -1), l); + if (start < 1) start = 1; + if (end > (ptrdiff_t)l) end = (ptrdiff_t)l; + if (start <= end) + lua_pushlstring(L, s+start-1, end-start+1); + else lua_pushliteral(L, ""); + return 1; +} + + +static int str_reverse (lua_State *L) { + size_t l; + luaL_Buffer b; + const char *s = luaL_checklstring(L, 1, &l); + luaL_buffinit(L, &b); + while (l--) luaL_addchar(&b, s[l]); + luaL_pushresult(&b); + return 1; +} + + +static int str_lower (lua_State *L) { + size_t l; + size_t i; + luaL_Buffer b; + const char *s = luaL_checklstring(L, 1, &l); + luaL_buffinit(L, &b); + for (i=0; i 0) + luaL_addlstring(&b, s, l); + luaL_pushresult(&b); + return 1; +} + + +static int str_byte (lua_State *L) { + size_t l; + const char *s = luaL_checklstring(L, 1, &l); + ptrdiff_t posi = posrelat(luaL_optinteger(L, 2, 1), l); + ptrdiff_t pose = posrelat(luaL_optinteger(L, 3, posi), l); + int n, i; + if (posi <= 0) posi = 1; + if ((size_t)pose > l) pose = l; + if (posi > pose) return 0; /* empty interval; return no values */ + n = (int)(pose - posi + 1); + if (posi + n <= pose) /* overflow? */ + luaL_error(L, "string slice too long"); + luaL_checkstack(L, n, "string slice too long"); + for (i=0; i= ms->level || ms->capture[l].len == CAP_UNFINISHED) + return luaL_error(ms->L, "invalid capture index"); + return l; +} + + +static int capture_to_close (MatchState *ms) { + int level = ms->level; + for (level--; level>=0; level--) + if (ms->capture[level].len == CAP_UNFINISHED) return level; + return luaL_error(ms->L, "invalid pattern capture"); +} + + +static const char *classend (MatchState *ms, const char *p) { + switch (*p++) { + case L_ESC: { + if (*p == '\0') + luaL_error(ms->L, "malformed pattern (ends with " LUA_QL("%%") ")"); + return p+1; + } + case '[': { + if (*p == '^') p++; + do { /* look for a `]' */ + if (*p == '\0') + luaL_error(ms->L, "malformed pattern (missing " LUA_QL("]") ")"); + if (*(p++) == L_ESC && *p != '\0') + p++; /* skip escapes (e.g. `%]') */ + } while (*p != ']'); + return p+1; + } + default: { + return p; + } + } +} + + +static int match_class (int c, int cl) { + int res; + switch (tolower(cl)) { + case 'a' : res = isalpha(c); break; + case 'c' : res = iscntrl(c); break; + case 'd' : res = isdigit(c); break; + case 'l' : res = islower(c); break; + case 'p' : res = ispunct(c); break; + case 's' : res = isspace(c); break; + case 'u' : res = isupper(c); break; + case 'w' : res = isalnum(c); break; + case 'x' : res = isxdigit(c); break; + case 'z' : res = (c == 0); break; + default: return (cl == c); + } + return (islower(cl) ? res : !res); +} + + +static int matchbracketclass (int c, const char *p, const char *ec) { + int sig = 1; + if (*(p+1) == '^') { + sig = 0; + p++; /* skip the `^' */ + } + while (++p < ec) { + if (*p == L_ESC) { + p++; + if (match_class(c, uchar(*p))) + return sig; + } + else if ((*(p+1) == '-') && (p+2 < ec)) { + p+=2; + if (uchar(*(p-2)) <= c && c <= uchar(*p)) + return sig; + } + else if (uchar(*p) == c) return sig; + } + return !sig; +} + + +static int singlematch (int c, const char *p, const char *ep) { + switch (*p) { + case '.': return 1; /* matches any char */ + case L_ESC: return match_class(c, uchar(*(p+1))); + case '[': return matchbracketclass(c, p, ep-1); + default: return (uchar(*p) == c); + } +} + + +static const char *match (MatchState *ms, const char *s, const char *p); + + +static const char *matchbalance (MatchState *ms, const char *s, + const char *p) { + if (*p == 0 || *(p+1) == 0) + luaL_error(ms->L, "unbalanced pattern"); + if (*s != *p) return NULL; + else { + int b = *p; + int e = *(p+1); + int cont = 1; + while (++s < ms->src_end) { + if (*s == e) { + if (--cont == 0) return s+1; + } + else if (*s == b) cont++; + } + } + return NULL; /* string ends out of balance */ +} + + +static const char *max_expand (MatchState *ms, const char *s, + const char *p, const char *ep) { + ptrdiff_t i = 0; /* counts maximum expand for item */ + while ((s+i)src_end && singlematch(uchar(*(s+i)), p, ep)) + i++; + /* keeps trying to match with the maximum repetitions */ + while (i>=0) { + const char *res = match(ms, (s+i), ep+1); + if (res) return res; + i--; /* else didn't match; reduce 1 repetition to try again */ + } + return NULL; +} + + +static const char *min_expand (MatchState *ms, const char *s, + const char *p, const char *ep) { + for (;;) { + const char *res = match(ms, s, ep+1); + if (res != NULL) + return res; + else if (ssrc_end && singlematch(uchar(*s), p, ep)) + s++; /* try with one more repetition */ + else return NULL; + } +} + + +static const char *start_capture (MatchState *ms, const char *s, + const char *p, int what) { + const char *res; + int level = ms->level; + if (level >= LUA_MAXCAPTURES) luaL_error(ms->L, "too many captures"); + ms->capture[level].init = s; + ms->capture[level].len = what; + ms->level = level+1; + if ((res=match(ms, s, p)) == NULL) /* match failed? */ + ms->level--; /* undo capture */ + return res; +} + + +static const char *end_capture (MatchState *ms, const char *s, + const char *p) { + int l = capture_to_close(ms); + const char *res; + ms->capture[l].len = s - ms->capture[l].init; /* close capture */ + if ((res = match(ms, s, p)) == NULL) /* match failed? */ + ms->capture[l].len = CAP_UNFINISHED; /* undo capture */ + return res; +} + + +static const char *match_capture (MatchState *ms, const char *s, int l) { + size_t len; + l = check_capture(ms, l); + len = ms->capture[l].len; + if ((size_t)(ms->src_end-s) >= len && + memcmp(ms->capture[l].init, s, len) == 0) + return s+len; + else return NULL; +} + + +static const char *match (MatchState *ms, const char *s, const char *p) { + init: /* using goto's to optimize tail recursion */ + switch (*p) { + case '(': { /* start capture */ + if (*(p+1) == ')') /* position capture? */ + return start_capture(ms, s, p+2, CAP_POSITION); + else + return start_capture(ms, s, p+1, CAP_UNFINISHED); + } + case ')': { /* end capture */ + return end_capture(ms, s, p+1); + } + case L_ESC: { + switch (*(p+1)) { + case 'b': { /* balanced string? */ + s = matchbalance(ms, s, p+2); + if (s == NULL) return NULL; + p+=4; goto init; /* else return match(ms, s, p+4); */ + } + case 'f': { /* frontier? */ + const char *ep; char previous; + p += 2; + if (*p != '[') + luaL_error(ms->L, "missing " LUA_QL("[") " after " + LUA_QL("%%f") " in pattern"); + ep = classend(ms, p); /* points to what is next */ + previous = (s == ms->src_init) ? '\0' : *(s-1); + if (matchbracketclass(uchar(previous), p, ep-1) || + !matchbracketclass(uchar(*s), p, ep-1)) return NULL; + p=ep; goto init; /* else return match(ms, s, ep); */ + } + default: { + if (isdigit(uchar(*(p+1)))) { /* capture results (%0-%9)? */ + s = match_capture(ms, s, uchar(*(p+1))); + if (s == NULL) return NULL; + p+=2; goto init; /* else return match(ms, s, p+2) */ + } + goto dflt; /* case default */ + } + } + } + case '\0': { /* end of pattern */ + return s; /* match succeeded */ + } + case '$': { + if (*(p+1) == '\0') /* is the `$' the last char in pattern? */ + return (s == ms->src_end) ? s : NULL; /* check end of string */ + else goto dflt; + } + default: dflt: { /* it is a pattern item */ + const char *ep = classend(ms, p); /* points to what is next */ + int m = ssrc_end && singlematch(uchar(*s), p, ep); + switch (*ep) { + case '?': { /* optional */ + const char *res; + if (m && ((res=match(ms, s+1, ep+1)) != NULL)) + return res; + p=ep+1; goto init; /* else return match(ms, s, ep+1); */ + } + case '*': { /* 0 or more repetitions */ + return max_expand(ms, s, p, ep); + } + case '+': { /* 1 or more repetitions */ + return (m ? max_expand(ms, s+1, p, ep) : NULL); + } + case '-': { /* 0 or more repetitions (minimum) */ + return min_expand(ms, s, p, ep); + } + default: { + if (!m) return NULL; + s++; p=ep; goto init; /* else return match(ms, s+1, ep); */ + } + } + } + } +} + + + +static const char *lmemfind (const char *s1, size_t l1, + const char *s2, size_t l2) { + if (l2 == 0) return s1; /* empty strings are everywhere */ + else if (l2 > l1) return NULL; /* avoids a negative `l1' */ + else { + const char *init; /* to search for a `*s2' inside `s1' */ + l2--; /* 1st char will be checked by `memchr' */ + l1 = l1-l2; /* `s2' cannot be found after that */ + while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) { + init++; /* 1st char is already checked */ + if (memcmp(init, s2+1, l2) == 0) + return init-1; + else { /* correct `l1' and `s1' to try again */ + l1 -= init-s1; + s1 = init; + } + } + return NULL; /* not found */ + } +} + + +static void push_onecapture (MatchState *ms, int i, const char *s, + const char *e) { + if (i >= ms->level) { + if (i == 0) /* ms->level == 0, too */ + lua_pushlstring(ms->L, s, e - s); /* add whole match */ + else + luaL_error(ms->L, "invalid capture index"); + } + else { + ptrdiff_t l = ms->capture[i].len; + if (l == CAP_UNFINISHED) luaL_error(ms->L, "unfinished capture"); + if (l == CAP_POSITION) + lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1); + else + lua_pushlstring(ms->L, ms->capture[i].init, l); + } +} + + +static int push_captures (MatchState *ms, const char *s, const char *e) { + int i; + int nlevels = (ms->level == 0 && s) ? 1 : ms->level; + luaL_checkstack(ms->L, nlevels, "too many captures"); + for (i = 0; i < nlevels; i++) + push_onecapture(ms, i, s, e); + return nlevels; /* number of strings pushed */ +} + + +static int str_find_aux (lua_State *L, int find) { + size_t l1, l2; + const char *s = luaL_checklstring(L, 1, &l1); + const char *p = luaL_checklstring(L, 2, &l2); + ptrdiff_t init = posrelat(luaL_optinteger(L, 3, 1), l1) - 1; + if (init < 0) init = 0; + else if ((size_t)(init) > l1) init = (ptrdiff_t)l1; + if (find && (lua_toboolean(L, 4) || /* explicit request? */ + strpbrk(p, SPECIALS) == NULL)) { /* or no special characters? */ + /* do a plain search */ + const char *s2 = lmemfind(s+init, l1-init, p, l2); + if (s2) { + lua_pushinteger(L, s2-s+1); + lua_pushinteger(L, s2-s+l2); + return 2; + } + } + else { + MatchState ms; + int anchor = (*p == '^') ? (p++, 1) : 0; + const char *s1=s+init; + ms.L = L; + ms.src_init = s; + ms.src_end = s+l1; + do { + const char *res; + ms.level = 0; + if ((res=match(&ms, s1, p)) != NULL) { + if (find) { + lua_pushinteger(L, s1-s+1); /* start */ + lua_pushinteger(L, res-s); /* end */ + return push_captures(&ms, NULL, 0) + 2; + } + else + return push_captures(&ms, s1, res); + } + } while (s1++ < ms.src_end && !anchor); + } + lua_pushnil(L); /* not found */ + return 1; +} + + +static int str_find (lua_State *L) { + return str_find_aux(L, 1); +} + + +static int str_match (lua_State *L) { + return str_find_aux(L, 0); +} + + +static int gmatch_aux (lua_State *L) { + MatchState ms; + size_t ls; + const char *s = lua_tolstring(L, lua_upvalueindex(1), &ls); + const char *p = lua_tostring(L, lua_upvalueindex(2)); + const char *src; + ms.L = L; + ms.src_init = s; + ms.src_end = s+ls; + for (src = s + (size_t)lua_tointeger(L, lua_upvalueindex(3)); + src <= ms.src_end; + src++) { + const char *e; + ms.level = 0; + if ((e = match(&ms, src, p)) != NULL) { + lua_Integer newstart = e-s; + if (e == src) newstart++; /* empty match? go at least one position */ + lua_pushinteger(L, newstart); + lua_replace(L, lua_upvalueindex(3)); + return push_captures(&ms, src, e); + } + } + return 0; /* not found */ +} + + +static int gmatch (lua_State *L) { + luaL_checkstring(L, 1); + luaL_checkstring(L, 2); + lua_settop(L, 2); + lua_pushinteger(L, 0); + lua_pushcclosure(L, gmatch_aux, 3); + return 1; +} + + +static int gfind_nodef (lua_State *L) { + return luaL_error(L, LUA_QL("string.gfind") " was renamed to " + LUA_QL("string.gmatch")); +} + + +static void add_s (MatchState *ms, luaL_Buffer *b, const char *s, + const char *e) { + size_t l, i; + const char *news = lua_tolstring(ms->L, 3, &l); + for (i = 0; i < l; i++) { + if (news[i] != L_ESC) + luaL_addchar(b, news[i]); + else { + i++; /* skip ESC */ + if (!isdigit(uchar(news[i]))) + luaL_addchar(b, news[i]); + else if (news[i] == '0') + luaL_addlstring(b, s, e - s); + else { + push_onecapture(ms, news[i] - '1', s, e); + luaL_addvalue(b); /* add capture to accumulated result */ + } + } + } +} + + +static void add_value (MatchState *ms, luaL_Buffer *b, const char *s, + const char *e) { + lua_State *L = ms->L; + switch (lua_type(L, 3)) { + case LUA_TNUMBER: + case LUA_TSTRING: { + add_s(ms, b, s, e); + return; + } + case LUA_TFUNCTION: { + int n; + lua_pushvalue(L, 3); + n = push_captures(ms, s, e); + lua_call(L, n, 1); + break; + } + case LUA_TTABLE: { + push_onecapture(ms, 0, s, e); + lua_gettable(L, 3); + break; + } + default: { + luaL_argerror(L, 3, "string/function/table expected"); + return; + } + } + if (!lua_toboolean(L, -1)) { /* nil or false? */ + lua_pop(L, 1); + lua_pushlstring(L, s, e - s); /* keep original text */ + } + else if (!lua_isstring(L, -1)) + luaL_error(L, "invalid replacement value (a %s)", luaL_typename(L, -1)); + luaL_addvalue(b); /* add result to accumulator */ +} + + +static int str_gsub (lua_State *L) { + size_t srcl; + const char *src = luaL_checklstring(L, 1, &srcl); + const char *p = luaL_checkstring(L, 2); + int max_s = luaL_optint(L, 4, srcl+1); + int anchor = (*p == '^') ? (p++, 1) : 0; + int n = 0; + MatchState ms; + luaL_Buffer b; + luaL_buffinit(L, &b); + ms.L = L; + ms.src_init = src; + ms.src_end = src+srcl; + while (n < max_s) { + const char *e; + ms.level = 0; + e = match(&ms, src, p); + if (e) { + n++; + add_value(&ms, &b, src, e); + } + if (e && e>src) /* non empty match? */ + src = e; /* skip it */ + else if (src < ms.src_end) + luaL_addchar(&b, *src++); + else break; + if (anchor) break; + } + luaL_addlstring(&b, src, ms.src_end-src); + luaL_pushresult(&b); + lua_pushinteger(L, n); /* number of substitutions */ + return 2; +} + +/* }====================================================== */ + + +/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */ +#define MAX_ITEM 512 +/* valid flags in a format specification */ +#define FLAGS "-+ #0" +/* +** maximum size of each format specification (such as '%-099.99d') +** (+10 accounts for %99.99x plus margin of error) +*/ +#define MAX_FORMAT (sizeof(FLAGS) + sizeof(LUA_INTFRMLEN) + 10) + + +static void addquoted (lua_State *L, luaL_Buffer *b, int arg) { + size_t l; + const char *s = luaL_checklstring(L, arg, &l); + luaL_addchar(b, '"'); + while (l--) { + switch (*s) { + case '"': case '\\': case '\n': { + luaL_addchar(b, '\\'); + luaL_addchar(b, *s); + break; + } + case '\0': { + luaL_addlstring(b, "\\000", 4); + break; + } + default: { + luaL_addchar(b, *s); + break; + } + } + s++; + } + luaL_addchar(b, '"'); +} + +static const char *scanformat (lua_State *L, const char *strfrmt, char *form) { + const char *p = strfrmt; + while (strchr(FLAGS, *p)) p++; /* skip flags */ + if ((size_t)(p - strfrmt) >= sizeof(FLAGS)) + luaL_error(L, "invalid format (repeated flags)"); + if (isdigit(uchar(*p))) p++; /* skip width */ + if (isdigit(uchar(*p))) p++; /* (2 digits at most) */ + if (*p == '.') { + p++; + if (isdigit(uchar(*p))) p++; /* skip precision */ + if (isdigit(uchar(*p))) p++; /* (2 digits at most) */ + } + if (isdigit(uchar(*p))) + luaL_error(L, "invalid format (width or precision too long)"); + *(form++) = '%'; + strncpy(form, strfrmt, p - strfrmt + 1); + form += p - strfrmt + 1; + *form = '\0'; + return p; +} + + +static void addintlen (char *form) { + size_t l = strlen(form); + char spec = form[l - 1]; + strcpy(form + l - 1, LUA_INTFRMLEN); + form[l + sizeof(LUA_INTFRMLEN) - 2] = spec; + form[l + sizeof(LUA_INTFRMLEN) - 1] = '\0'; +} + + +static int str_format (lua_State *L) { + int arg = 1; + size_t sfl; + const char *strfrmt = luaL_checklstring(L, arg, &sfl); + const char *strfrmt_end = strfrmt+sfl; + luaL_Buffer b; + luaL_buffinit(L, &b); + while (strfrmt < strfrmt_end) { + if (*strfrmt != L_ESC) + luaL_addchar(&b, *strfrmt++); + else if (*++strfrmt == L_ESC) + luaL_addchar(&b, *strfrmt++); /* %% */ + else { /* format item */ + char form[MAX_FORMAT]; /* to store the format (`%...') */ + char buff[MAX_ITEM]; /* to store the formatted item */ + arg++; + strfrmt = scanformat(L, strfrmt, form); + switch (*strfrmt++) { + case 'c': { + sprintf(buff, form, (int)luaL_checknumber(L, arg)); + break; + } + case 'd': case 'i': { + addintlen(form); + sprintf(buff, form, (LUA_INTFRM_T)luaL_checknumber(L, arg)); + break; + } + case 'o': case 'u': case 'x': case 'X': { + addintlen(form); + sprintf(buff, form, (unsigned LUA_INTFRM_T)luaL_checknumber(L, arg)); + break; + } + case 'e': case 'E': case 'f': + case 'g': case 'G': { + sprintf(buff, form, (double)luaL_checknumber(L, arg)); + break; + } + case 'q': { + addquoted(L, &b, arg); + continue; /* skip the 'addsize' at the end */ + } + case 's': { + size_t l; + const char *s = luaL_checklstring(L, arg, &l); + if (!strchr(form, '.') && l >= 100) { + /* no precision and string is too long to be formatted; + keep original string */ + lua_pushvalue(L, arg); + luaL_addvalue(&b); + continue; /* skip the `addsize' at the end */ + } + else { + sprintf(buff, form, s); + break; + } + } + default: { /* also treat cases `pnLlh' */ + return luaL_error(L, "invalid option to " LUA_QL("format")); + } + } + luaL_addlstring(&b, buff, strlen(buff)); + } + } + luaL_pushresult(&b); + return 1; +} + + +static const luaL_Reg strlib[] = { + {"byte", str_byte}, + {"char", str_char}, + {"dump", str_dump}, + {"find", str_find}, + {"format", str_format}, + {"gfind", gfind_nodef}, + {"gmatch", gmatch}, + {"gsub", str_gsub}, + {"len", str_len}, + {"lower", str_lower}, + {"match", str_match}, + {"rep", str_rep}, + {"reverse", str_reverse}, + {"sub", str_sub}, + {"upper", str_upper}, + {NULL, NULL} +}; + + +static void createmetatable (lua_State *L) { + lua_createtable(L, 0, 1); /* create metatable for strings */ + lua_pushliteral(L, ""); /* dummy string */ + lua_pushvalue(L, -2); + lua_setmetatable(L, -2); /* set string metatable */ + lua_pop(L, 1); /* pop dummy string */ + lua_pushvalue(L, -2); /* string library... */ + lua_setfield(L, -2, "__index"); /* ...is the __index metamethod */ + lua_pop(L, 1); /* pop metatable */ +} + + +/* +** Open string library +*/ +LUALIB_API int luaopen_string (lua_State *L) { + luaL_register(L, LUA_STRLIBNAME, strlib); +#if defined(LUA_COMPAT_GFIND) + lua_getfield(L, -1, "gmatch"); + lua_setfield(L, -2, "gfind"); +#endif + createmetatable(L); + return 1; +} + diff --git a/deps/lua/src/ltable.c b/deps/lua/src/ltable.c new file mode 100644 index 0000000000000000000000000000000000000000..bc91cacd637fe4563637f8c02740dc57a9cf50de --- /dev/null +++ b/deps/lua/src/ltable.c @@ -0,0 +1,588 @@ +/* +** $Id: ltable.c,v 2.32 2006/01/18 11:49:02 roberto Exp $ +** Lua tables (hash) +** See Copyright Notice in lua.h +*/ + + +/* +** Implementation of tables (aka arrays, objects, or hash tables). +** Tables keep its elements in two parts: an array part and a hash part. +** Non-negative integer keys are all candidates to be kept in the array +** part. The actual size of the array is the largest `n' such that at +** least half the slots between 0 and n are in use. +** Hash uses a mix of chained scatter table with Brent's variation. +** A main invariant of these tables is that, if an element is not +** in its main position (i.e. the `original' position that its hash gives +** to it), then the colliding element is in its own main position. +** Hence even when the load factor reaches 100%, performance remains good. +*/ + +#include +#include + +#define ltable_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lgc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstate.h" +#include "ltable.h" + + +/* +** max size of array part is 2^MAXBITS +*/ +#if LUAI_BITSINT > 26 +#define MAXBITS 26 +#else +#define MAXBITS (LUAI_BITSINT-2) +#endif + +#define MAXASIZE (1 << MAXBITS) + + +#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t)))) + +#define hashstr(t,str) hashpow2(t, (str)->tsv.hash) +#define hashboolean(t,p) hashpow2(t, p) + + +/* +** for some types, it is better to avoid modulus by power of 2, as +** they tend to have many 2 factors. +*/ +#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1)))) + + +#define hashpointer(t,p) hashmod(t, IntPoint(p)) + + +/* +** number of ints inside a lua_Number +*/ +#define numints cast_int(sizeof(lua_Number)/sizeof(int)) + + + +#define dummynode (&dummynode_) + +static const Node dummynode_ = { + {{NULL}, LUA_TNIL}, /* value */ + {{{NULL}, LUA_TNIL, NULL}} /* key */ +}; + + +/* +** hash for lua_Numbers +*/ +static Node *hashnum (const Table *t, lua_Number n) { + unsigned int a[numints]; + int i; + n += 1; /* normalize number (avoid -0) */ + lua_assert(sizeof(a) <= sizeof(n)); + memcpy(a, &n, sizeof(a)); + for (i = 1; i < numints; i++) a[0] += a[i]; + return hashmod(t, a[0]); +} + + + +/* +** returns the `main' position of an element in a table (that is, the index +** of its hash value) +*/ +static Node *mainposition (const Table *t, const TValue *key) { + switch (ttype(key)) { + case LUA_TNUMBER: + return hashnum(t, nvalue(key)); + case LUA_TSTRING: + return hashstr(t, rawtsvalue(key)); + case LUA_TBOOLEAN: + return hashboolean(t, bvalue(key)); + case LUA_TLIGHTUSERDATA: + return hashpointer(t, pvalue(key)); + default: + return hashpointer(t, gcvalue(key)); + } +} + + +/* +** returns the index for `key' if `key' is an appropriate key to live in +** the array part of the table, -1 otherwise. +*/ +static int arrayindex (const TValue *key) { + if (ttisnumber(key)) { + lua_Number n = nvalue(key); + int k; + lua_number2int(k, n); + if (luai_numeq(cast_num(k), n)) + return k; + } + return -1; /* `key' did not match some condition */ +} + + +/* +** returns the index of a `key' for table traversals. First goes all +** elements in the array part, then elements in the hash part. The +** beginning of a traversal is signalled by -1. +*/ +static int findindex (lua_State *L, Table *t, StkId key) { + int i; + if (ttisnil(key)) return -1; /* first iteration */ + i = arrayindex(key); + if (0 < i && i <= t->sizearray) /* is `key' inside array part? */ + return i-1; /* yes; that's the index (corrected to C) */ + else { + Node *n = mainposition(t, key); + do { /* check whether `key' is somewhere in the chain */ + /* key may be dead already, but it is ok to use it in `next' */ + if (luaO_rawequalObj(key2tval(n), key) || + (ttype(gkey(n)) == LUA_TDEADKEY && iscollectable(key) && + gcvalue(gkey(n)) == gcvalue(key))) { + i = cast_int(n - gnode(t, 0)); /* key index in hash table */ + /* hash elements are numbered after array ones */ + return i + t->sizearray; + } + else n = gnext(n); + } while (n); + luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */ + return 0; /* to avoid warnings */ + } +} + + +int luaH_next (lua_State *L, Table *t, StkId key) { + int i = findindex(L, t, key); /* find original element */ + for (i++; i < t->sizearray; i++) { /* try first array part */ + if (!ttisnil(&t->array[i])) { /* a non-nil value? */ + setnvalue(key, cast_num(i+1)); + setobj2s(L, key+1, &t->array[i]); + return 1; + } + } + for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */ + if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */ + setobj2s(L, key, key2tval(gnode(t, i))); + setobj2s(L, key+1, gval(gnode(t, i))); + return 1; + } + } + return 0; /* no more elements */ +} + + +/* +** {============================================================= +** Rehash +** ============================================================== +*/ + + +static int computesizes (int nums[], int *narray) { + int i; + int twotoi; /* 2^i */ + int a = 0; /* number of elements smaller than 2^i */ + int na = 0; /* number of elements to go to array part */ + int n = 0; /* optimal size for array part */ + for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) { + if (nums[i] > 0) { + a += nums[i]; + if (a > twotoi/2) { /* more than half elements present? */ + n = twotoi; /* optimal size (till now) */ + na = a; /* all elements smaller than n will go to array part */ + } + } + if (a == *narray) break; /* all elements already counted */ + } + *narray = n; + lua_assert(*narray/2 <= na && na <= *narray); + return na; +} + + +static int countint (const TValue *key, int *nums) { + int k = arrayindex(key); + if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */ + nums[ceillog2(k)]++; /* count as such */ + return 1; + } + else + return 0; +} + + +static int numusearray (const Table *t, int *nums) { + int lg; + int ttlg; /* 2^lg */ + int ause = 0; /* summation of `nums' */ + int i = 1; /* count to traverse all array keys */ + for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */ + int lc = 0; /* counter */ + int lim = ttlg; + if (lim > t->sizearray) { + lim = t->sizearray; /* adjust upper limit */ + if (i > lim) + break; /* no more elements to count */ + } + /* count elements in range (2^(lg-1), 2^lg] */ + for (; i <= lim; i++) { + if (!ttisnil(&t->array[i-1])) + lc++; + } + nums[lg] += lc; + ause += lc; + } + return ause; +} + + +static int numusehash (const Table *t, int *nums, int *pnasize) { + int totaluse = 0; /* total number of elements */ + int ause = 0; /* summation of `nums' */ + int i = sizenode(t); + while (i--) { + Node *n = &t->node[i]; + if (!ttisnil(gval(n))) { + ause += countint(key2tval(n), nums); + totaluse++; + } + } + *pnasize += ause; + return totaluse; +} + + +static void setarrayvector (lua_State *L, Table *t, int size) { + int i; + luaM_reallocvector(L, t->array, t->sizearray, size, TValue); + for (i=t->sizearray; iarray[i]); + t->sizearray = size; +} + + +static void setnodevector (lua_State *L, Table *t, int size) { + int lsize; + if (size == 0) { /* no elements to hash part? */ + t->node = cast(Node *, dummynode); /* use common `dummynode' */ + lsize = 0; + } + else { + int i; + lsize = ceillog2(size); + if (lsize > MAXBITS) + luaG_runerror(L, "table overflow"); + size = twoto(lsize); + t->node = luaM_newvector(L, size, Node); + for (i=0; ilsizenode = cast_byte(lsize); + t->lastfree = gnode(t, size); /* all positions are free */ +} + + +static void resize (lua_State *L, Table *t, int nasize, int nhsize) { + int i; + int oldasize = t->sizearray; + int oldhsize = t->lsizenode; + Node *nold = t->node; /* save old hash ... */ + if (nasize > oldasize) /* array part must grow? */ + setarrayvector(L, t, nasize); + /* create new hash part with appropriate size */ + setnodevector(L, t, nhsize); + if (nasize < oldasize) { /* array part must shrink? */ + t->sizearray = nasize; + /* re-insert elements from vanishing slice */ + for (i=nasize; iarray[i])) + setobjt2t(L, luaH_setnum(L, t, i+1), &t->array[i]); + } + /* shrink array */ + luaM_reallocvector(L, t->array, oldasize, nasize, TValue); + } + /* re-insert elements from hash part */ + for (i = twoto(oldhsize) - 1; i >= 0; i--) { + Node *old = nold+i; + if (!ttisnil(gval(old))) + setobjt2t(L, luaH_set(L, t, key2tval(old)), gval(old)); + } + if (nold != dummynode) + luaM_freearray(L, nold, twoto(oldhsize), Node); /* free old array */ +} + + +void luaH_resizearray (lua_State *L, Table *t, int nasize) { + int nsize = (t->node == dummynode) ? 0 : sizenode(t); + resize(L, t, nasize, nsize); +} + + +static void rehash (lua_State *L, Table *t, const TValue *ek) { + int nasize, na; + int nums[MAXBITS+1]; /* nums[i] = number of keys between 2^(i-1) and 2^i */ + int i; + int totaluse; + for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */ + nasize = numusearray(t, nums); /* count keys in array part */ + totaluse = nasize; /* all those keys are integer keys */ + totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */ + /* count extra key */ + nasize += countint(ek, nums); + totaluse++; + /* compute new size for array part */ + na = computesizes(nums, &nasize); + /* resize the table to new computed sizes */ + resize(L, t, nasize, totaluse - na); +} + + + +/* +** }============================================================= +*/ + + +Table *luaH_new (lua_State *L, int narray, int nhash) { + Table *t = luaM_new(L, Table); + luaC_link(L, obj2gco(t), LUA_TTABLE); + t->metatable = NULL; + t->flags = cast_byte(~0); + /* temporary values (kept only if some malloc fails) */ + t->array = NULL; + t->sizearray = 0; + t->lsizenode = 0; + t->node = cast(Node *, dummynode); + setarrayvector(L, t, narray); + setnodevector(L, t, nhash); + return t; +} + + +void luaH_free (lua_State *L, Table *t) { + if (t->node != dummynode) + luaM_freearray(L, t->node, sizenode(t), Node); + luaM_freearray(L, t->array, t->sizearray, TValue); + luaM_free(L, t); +} + + +static Node *getfreepos (Table *t) { + while (t->lastfree-- > t->node) { + if (ttisnil(gkey(t->lastfree))) + return t->lastfree; + } + return NULL; /* could not find a free place */ +} + + + +/* +** inserts a new key into a hash table; first, check whether key's main +** position is free. If not, check whether colliding node is in its main +** position or not: if it is not, move colliding node to an empty place and +** put new key in its main position; otherwise (colliding node is in its main +** position), new key goes to an empty position. +*/ +static TValue *newkey (lua_State *L, Table *t, const TValue *key) { + Node *mp = mainposition(t, key); + if (!ttisnil(gval(mp)) || mp == dummynode) { + Node *othern; + Node *n = getfreepos(t); /* get a free place */ + if (n == NULL) { /* cannot find a free place? */ + rehash(L, t, key); /* grow table */ + return luaH_set(L, t, key); /* re-insert key into grown table */ + } + lua_assert(n != dummynode); + othern = mainposition(t, key2tval(mp)); + if (othern != mp) { /* is colliding node out of its main position? */ + /* yes; move colliding node into free position */ + while (gnext(othern) != mp) othern = gnext(othern); /* find previous */ + gnext(othern) = n; /* redo the chain with `n' in place of `mp' */ + *n = *mp; /* copy colliding node into free pos. (mp->next also goes) */ + gnext(mp) = NULL; /* now `mp' is free */ + setnilvalue(gval(mp)); + } + else { /* colliding node is in its own main position */ + /* new node will go into free position */ + gnext(n) = gnext(mp); /* chain new position */ + gnext(mp) = n; + mp = n; + } + } + gkey(mp)->value = key->value; gkey(mp)->tt = key->tt; + luaC_barriert(L, t, key); + lua_assert(ttisnil(gval(mp))); + return gval(mp); +} + + +/* +** search function for integers +*/ +const TValue *luaH_getnum (Table *t, int key) { + /* (1 <= key && key <= t->sizearray) */ + if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray)) + return &t->array[key-1]; + else { + lua_Number nk = cast_num(key); + Node *n = hashnum(t, nk); + do { /* check whether `key' is somewhere in the chain */ + if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk)) + return gval(n); /* that's it */ + else n = gnext(n); + } while (n); + return luaO_nilobject; + } +} + + +/* +** search function for strings +*/ +const TValue *luaH_getstr (Table *t, TString *key) { + Node *n = hashstr(t, key); + do { /* check whether `key' is somewhere in the chain */ + if (ttisstring(gkey(n)) && rawtsvalue(gkey(n)) == key) + return gval(n); /* that's it */ + else n = gnext(n); + } while (n); + return luaO_nilobject; +} + + +/* +** main search function +*/ +const TValue *luaH_get (Table *t, const TValue *key) { + switch (ttype(key)) { + case LUA_TNIL: return luaO_nilobject; + case LUA_TSTRING: return luaH_getstr(t, rawtsvalue(key)); + case LUA_TNUMBER: { + int k; + lua_Number n = nvalue(key); + lua_number2int(k, n); + if (luai_numeq(cast_num(k), nvalue(key))) /* index is int? */ + return luaH_getnum(t, k); /* use specialized version */ + /* else go through */ + } + default: { + Node *n = mainposition(t, key); + do { /* check whether `key' is somewhere in the chain */ + if (luaO_rawequalObj(key2tval(n), key)) + return gval(n); /* that's it */ + else n = gnext(n); + } while (n); + return luaO_nilobject; + } + } +} + + +TValue *luaH_set (lua_State *L, Table *t, const TValue *key) { + const TValue *p = luaH_get(t, key); + t->flags = 0; + if (p != luaO_nilobject) + return cast(TValue *, p); + else { + if (ttisnil(key)) luaG_runerror(L, "table index is nil"); + else if (ttisnumber(key) && luai_numisnan(nvalue(key))) + luaG_runerror(L, "table index is NaN"); + return newkey(L, t, key); + } +} + + +TValue *luaH_setnum (lua_State *L, Table *t, int key) { + const TValue *p = luaH_getnum(t, key); + if (p != luaO_nilobject) + return cast(TValue *, p); + else { + TValue k; + setnvalue(&k, cast_num(key)); + return newkey(L, t, &k); + } +} + + +TValue *luaH_setstr (lua_State *L, Table *t, TString *key) { + const TValue *p = luaH_getstr(t, key); + if (p != luaO_nilobject) + return cast(TValue *, p); + else { + TValue k; + setsvalue(L, &k, key); + return newkey(L, t, &k); + } +} + + +static int unbound_search (Table *t, unsigned int j) { + unsigned int i = j; /* i is zero or a present index */ + j++; + /* find `i' and `j' such that i is present and j is not */ + while (!ttisnil(luaH_getnum(t, j))) { + i = j; + j *= 2; + if (j > cast(unsigned int, MAX_INT)) { /* overflow? */ + /* table was built with bad purposes: resort to linear search */ + i = 1; + while (!ttisnil(luaH_getnum(t, i))) i++; + return i - 1; + } + } + /* now do a binary search between them */ + while (j - i > 1) { + unsigned int m = (i+j)/2; + if (ttisnil(luaH_getnum(t, m))) j = m; + else i = m; + } + return i; +} + + +/* +** Try to find a boundary in table `t'. A `boundary' is an integer index +** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil). +*/ +int luaH_getn (Table *t) { + unsigned int j = t->sizearray; + if (j > 0 && ttisnil(&t->array[j - 1])) { + /* there is a boundary in the array part: (binary) search for it */ + unsigned int i = 0; + while (j - i > 1) { + unsigned int m = (i+j)/2; + if (ttisnil(&t->array[m - 1])) j = m; + else i = m; + } + return i; + } + /* else must find a boundary in hash part */ + else if (t->node == dummynode) /* hash part is empty? */ + return j; /* that is easy... */ + else return unbound_search(t, j); +} + + + +#if defined(LUA_DEBUG) + +Node *luaH_mainposition (const Table *t, const TValue *key) { + return mainposition(t, key); +} + +int luaH_isdummy (Node *n) { return n == dummynode; } + +#endif diff --git a/deps/lua/src/ltable.h b/deps/lua/src/ltable.h new file mode 100644 index 0000000000000000000000000000000000000000..09193cdbe048c9a19b8277f9c2bfae4af98591cf --- /dev/null +++ b/deps/lua/src/ltable.h @@ -0,0 +1,40 @@ +/* +** $Id: ltable.h,v 2.10 2006/01/10 13:13:06 roberto Exp $ +** Lua tables (hash) +** See Copyright Notice in lua.h +*/ + +#ifndef ltable_h +#define ltable_h + +#include "lobject.h" + + +#define gnode(t,i) (&(t)->node[i]) +#define gkey(n) (&(n)->i_key.nk) +#define gval(n) (&(n)->i_val) +#define gnext(n) ((n)->i_key.nk.next) + +#define key2tval(n) (&(n)->i_key.tvk) + + +LUAI_FUNC const TValue *luaH_getnum (Table *t, int key); +LUAI_FUNC TValue *luaH_setnum (lua_State *L, Table *t, int key); +LUAI_FUNC const TValue *luaH_getstr (Table *t, TString *key); +LUAI_FUNC TValue *luaH_setstr (lua_State *L, Table *t, TString *key); +LUAI_FUNC const TValue *luaH_get (Table *t, const TValue *key); +LUAI_FUNC TValue *luaH_set (lua_State *L, Table *t, const TValue *key); +LUAI_FUNC Table *luaH_new (lua_State *L, int narray, int lnhash); +LUAI_FUNC void luaH_resizearray (lua_State *L, Table *t, int nasize); +LUAI_FUNC void luaH_free (lua_State *L, Table *t); +LUAI_FUNC int luaH_next (lua_State *L, Table *t, StkId key); +LUAI_FUNC int luaH_getn (Table *t); + + +#if defined(LUA_DEBUG) +LUAI_FUNC Node *luaH_mainposition (const Table *t, const TValue *key); +LUAI_FUNC int luaH_isdummy (Node *n); +#endif + + +#endif diff --git a/deps/lua/src/ltablib.c b/deps/lua/src/ltablib.c new file mode 100644 index 0000000000000000000000000000000000000000..453b23b378bc5a215905cc974a9d981827b989a8 --- /dev/null +++ b/deps/lua/src/ltablib.c @@ -0,0 +1,278 @@ +/* +** $Id: ltablib.c,v 1.38 2005/10/23 17:38:15 roberto Exp $ +** Library for Table Manipulation +** See Copyright Notice in lua.h +*/ + + +#include + +#define ltablib_c +#define LUA_LIB + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + +#define aux_getn(L,n) (luaL_checktype(L, n, LUA_TTABLE), luaL_getn(L, n)) + + +static int foreachi (lua_State *L) { + int i; + int n = aux_getn(L, 1); + luaL_checktype(L, 2, LUA_TFUNCTION); + for (i=1; i <= n; i++) { + lua_pushvalue(L, 2); /* function */ + lua_pushinteger(L, i); /* 1st argument */ + lua_rawgeti(L, 1, i); /* 2nd argument */ + lua_call(L, 2, 1); + if (!lua_isnil(L, -1)) + return 1; + lua_pop(L, 1); /* remove nil result */ + } + return 0; +} + + +static int foreach (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); + luaL_checktype(L, 2, LUA_TFUNCTION); + lua_pushnil(L); /* first key */ + while (lua_next(L, 1)) { + lua_pushvalue(L, 2); /* function */ + lua_pushvalue(L, -3); /* key */ + lua_pushvalue(L, -3); /* value */ + lua_call(L, 2, 1); + if (!lua_isnil(L, -1)) + return 1; + lua_pop(L, 2); /* remove value and result */ + } + return 0; +} + + +static int maxn (lua_State *L) { + lua_Number max = 0; + luaL_checktype(L, 1, LUA_TTABLE); + lua_pushnil(L); /* first key */ + while (lua_next(L, 1)) { + lua_pop(L, 1); /* remove value */ + if (lua_type(L, -1) == LUA_TNUMBER) { + lua_Number v = lua_tonumber(L, -1); + if (v > max) max = v; + } + } + lua_pushnumber(L, max); + return 1; +} + + +static int getn (lua_State *L) { + lua_pushinteger(L, aux_getn(L, 1)); + return 1; +} + + +static int setn (lua_State *L) { + luaL_checktype(L, 1, LUA_TTABLE); +#ifndef luaL_setn + luaL_setn(L, 1, luaL_checkint(L, 2)); +#else + luaL_error(L, LUA_QL("setn") " is obsolete"); +#endif + lua_pushvalue(L, 1); + return 1; +} + + +static int tinsert (lua_State *L) { + int e = aux_getn(L, 1) + 1; /* first empty element */ + int pos; /* where to insert new element */ + switch (lua_gettop(L)) { + case 2: { /* called with only 2 arguments */ + pos = e; /* insert new element at the end */ + break; + } + case 3: { + int i; + pos = luaL_checkint(L, 2); /* 2nd argument is the position */ + if (pos > e) e = pos; /* `grow' array if necessary */ + for (i = e; i > pos; i--) { /* move up elements */ + lua_rawgeti(L, 1, i-1); + lua_rawseti(L, 1, i); /* t[i] = t[i-1] */ + } + break; + } + default: { + return luaL_error(L, "wrong number of arguments to " LUA_QL("insert")); + } + } + luaL_setn(L, 1, e); /* new size */ + lua_rawseti(L, 1, pos); /* t[pos] = v */ + return 0; +} + + +static int tremove (lua_State *L) { + int e = aux_getn(L, 1); + int pos = luaL_optint(L, 2, e); + if (e == 0) return 0; /* table is `empty' */ + luaL_setn(L, 1, e - 1); /* t.n = n-1 */ + lua_rawgeti(L, 1, pos); /* result = t[pos] */ + for ( ;pos= P */ + while (lua_rawgeti(L, 1, ++i), sort_comp(L, -1, -2)) { + if (i>u) luaL_error(L, "invalid order function for sorting"); + lua_pop(L, 1); /* remove a[i] */ + } + /* repeat --j until a[j] <= P */ + while (lua_rawgeti(L, 1, --j), sort_comp(L, -3, -1)) { + if (j + +#define ltm_c +#define LUA_CORE + +#include "lua.h" + +#include "lobject.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" + + + +const char *const luaT_typenames[] = { + "nil", "boolean", "userdata", "number", + "string", "table", "function", "userdata", "thread", + "proto", "upval" +}; + + +void luaT_init (lua_State *L) { + static const char *const luaT_eventname[] = { /* ORDER TM */ + "__index", "__newindex", + "__gc", "__mode", "__eq", + "__add", "__sub", "__mul", "__div", "__mod", + "__pow", "__unm", "__len", "__lt", "__le", + "__concat", "__call" + }; + int i; + for (i=0; itmname[i] = luaS_new(L, luaT_eventname[i]); + luaS_fix(G(L)->tmname[i]); /* never collect these names */ + } +} + + +/* +** function to be used with macro "fasttm": optimized for absence of +** tag methods +*/ +const TValue *luaT_gettm (Table *events, TMS event, TString *ename) { + const TValue *tm = luaH_getstr(events, ename); + lua_assert(event <= TM_EQ); + if (ttisnil(tm)) { /* no tag method? */ + events->flags |= cast_byte(1u<metatable; + break; + case LUA_TUSERDATA: + mt = uvalue(o)->metatable; + break; + default: + mt = G(L)->mt[ttype(o)]; + } + return (mt ? luaH_getstr(mt, G(L)->tmname[event]) : luaO_nilobject); +} + diff --git a/deps/lua/src/ltm.h b/deps/lua/src/ltm.h new file mode 100644 index 0000000000000000000000000000000000000000..866c79668d89673f350d37d0408b37bd94965448 --- /dev/null +++ b/deps/lua/src/ltm.h @@ -0,0 +1,54 @@ +/* +** $Id: ltm.h,v 2.6 2005/06/06 13:30:25 roberto Exp $ +** Tag methods +** See Copyright Notice in lua.h +*/ + +#ifndef ltm_h +#define ltm_h + + +#include "lobject.h" + + +/* +* WARNING: if you change the order of this enumeration, +* grep "ORDER TM" +*/ +typedef enum { + TM_INDEX, + TM_NEWINDEX, + TM_GC, + TM_MODE, + TM_EQ, /* last tag method with `fast' access */ + TM_ADD, + TM_SUB, + TM_MUL, + TM_DIV, + TM_MOD, + TM_POW, + TM_UNM, + TM_LEN, + TM_LT, + TM_LE, + TM_CONCAT, + TM_CALL, + TM_N /* number of elements in the enum */ +} TMS; + + + +#define gfasttm(g,et,e) ((et) == NULL ? NULL : \ + ((et)->flags & (1u<<(e))) ? NULL : luaT_gettm(et, e, (g)->tmname[e])) + +#define fasttm(l,et,e) gfasttm(G(l), et, e) + +LUAI_DATA const char *const luaT_typenames[]; + + +LUAI_FUNC const TValue *luaT_gettm (Table *events, TMS event, TString *ename); +LUAI_FUNC const TValue *luaT_gettmbyobj (lua_State *L, const TValue *o, + TMS event); +LUAI_FUNC void luaT_init (lua_State *L); + +#endif diff --git a/deps/lua/src/lua.c b/deps/lua/src/lua.c new file mode 100644 index 0000000000000000000000000000000000000000..6df527db30fbd04bd9597cb610622a7e17a5002c --- /dev/null +++ b/deps/lua/src/lua.c @@ -0,0 +1,377 @@ +/* +** $Id: lua.c,v 1.157 2005/12/29 16:23:32 roberto Exp $ +** Lua stand-alone interpreter +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include +#include + +#define lua_c + +#include "lua.h" + +#include "lauxlib.h" +#include "lualib.h" + + + +static lua_State *globalL = NULL; + +static const char *progname = LUA_PROGNAME; + + + +static void lstop (lua_State *L, lua_Debug *ar) { + (void)ar; /* unused arg. */ + lua_sethook(L, NULL, 0, 0); + luaL_error(L, "interrupted!"); +} + + +static void laction (int i) { + signal(i, SIG_DFL); /* if another SIGINT happens before lstop, + terminate process (default action) */ + lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1); +} + + +static void print_usage (void) { + fprintf(stderr, + "usage: %s [options] [script [args]].\n" + "Available options are:\n" + " -e stat execute string " LUA_QL("stat") "\n" + " -l name require library " LUA_QL("name") "\n" + " -i enter interactive mode after executing " LUA_QL("script") "\n" + " -v show version information\n" + " -- stop handling options\n" + " - execute stdin and stop handling options\n" + , + progname); + fflush(stderr); +} + + +static void l_message (const char *pname, const char *msg) { + if (pname) fprintf(stderr, "%s: ", pname); + fprintf(stderr, "%s\n", msg); + fflush(stderr); +} + + +static int report (lua_State *L, int status) { + if (status && !lua_isnil(L, -1)) { + const char *msg = lua_tostring(L, -1); + if (msg == NULL) msg = "(error object is not a string)"; + l_message(progname, msg); + lua_pop(L, 1); + } + return status; +} + + +static int traceback (lua_State *L) { + lua_getfield(L, LUA_GLOBALSINDEX, "debug"); + if (!lua_istable(L, -1)) { + lua_pop(L, 1); + return 1; + } + lua_getfield(L, -1, "traceback"); + if (!lua_isfunction(L, -1)) { + lua_pop(L, 2); + return 1; + } + lua_pushvalue(L, 1); /* pass error message */ + lua_pushinteger(L, 2); /* skip this function and traceback */ + lua_call(L, 2, 1); /* call debug.traceback */ + return 1; +} + + +static int docall (lua_State *L, int narg, int clear) { + int status; + int base = lua_gettop(L) - narg; /* function index */ + lua_pushcfunction(L, traceback); /* push traceback function */ + lua_insert(L, base); /* put it under chunk and args */ + signal(SIGINT, laction); + status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base); + signal(SIGINT, SIG_DFL); + lua_remove(L, base); /* remove traceback function */ + /* force a complete garbage collection in case of errors */ + if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); + return status; +} + + +static void print_version (void) { + l_message(NULL, LUA_VERSION " " LUA_COPYRIGHT); +} + + +static int getargs (lua_State *L, char **argv, int n) { + int narg; + int i; + int argc = 0; + while (argv[argc]) argc++; /* count total number of arguments */ + narg = argc - (n + 1); /* number of arguments to the script */ + luaL_checkstack(L, narg + 3, "too many arguments to script"); + for (i=n+1; i < argc; i++) + lua_pushstring(L, argv[i]); + lua_createtable(L, narg, n + 1); + for (i=0; i < argc; i++) { + lua_pushstring(L, argv[i]); + lua_rawseti(L, -2, i - n); + } + return narg; +} + + +static int dofile (lua_State *L, const char *name) { + int status = luaL_loadfile(L, name) || docall(L, 0, 1); + return report(L, status); +} + + +static int dostring (lua_State *L, const char *s, const char *name) { + int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1); + return report(L, status); +} + + +static int dolibrary (lua_State *L, const char *name) { + lua_getglobal(L, "require"); + lua_pushstring(L, name); + return report(L, lua_pcall(L, 1, 0, 0)); +} + + +static const char *get_prompt (lua_State *L, int firstline) { + const char *p; + lua_getfield(L, LUA_GLOBALSINDEX, firstline ? "_PROMPT" : "_PROMPT2"); + p = lua_tostring(L, -1); + if (p == NULL) p = (firstline ? LUA_PROMPT : LUA_PROMPT2); + lua_pop(L, 1); /* remove global */ + return p; +} + + +static int incomplete (lua_State *L, int status) { + if (status == LUA_ERRSYNTAX) { + size_t lmsg; + const char *msg = lua_tolstring(L, -1, &lmsg); + const char *tp = msg + lmsg - (sizeof(LUA_QL("")) - 1); + if (strstr(msg, LUA_QL("")) == tp) { + lua_pop(L, 1); + return 1; + } + } + return 0; /* else... */ +} + + +static int pushline (lua_State *L, int firstline) { + char buffer[LUA_MAXINPUT]; + char *b = buffer; + size_t l; + const char *prmt = get_prompt(L, firstline); + if (lua_readline(L, b, prmt) == 0) + return 0; /* no input */ + l = strlen(b); + if (l > 0 && b[l-1] == '\n') /* line ends with newline? */ + b[l-1] = '\0'; /* remove it */ + if (firstline && b[0] == '=') /* first line starts with `=' ? */ + lua_pushfstring(L, "return %s", b+1); /* change it to `return' */ + else + lua_pushstring(L, b); + lua_freeline(L, b); + return 1; +} + + +static int loadline (lua_State *L) { + int status; + lua_settop(L, 0); + if (!pushline(L, 1)) + return -1; /* no input */ + for (;;) { /* repeat until gets a complete line */ + status = luaL_loadbuffer(L, lua_tostring(L, 1), lua_strlen(L, 1), "=stdin"); + if (!incomplete(L, status)) break; /* cannot try to add lines? */ + if (!pushline(L, 0)) /* no more input? */ + return -1; + lua_pushliteral(L, "\n"); /* add a new line... */ + lua_insert(L, -2); /* ...between the two lines */ + lua_concat(L, 3); /* join them */ + } + lua_saveline(L, 1); + lua_remove(L, 1); /* remove line */ + return status; +} + + +static void dotty (lua_State *L) { + int status; + const char *oldprogname = progname; + progname = NULL; + while ((status = loadline(L)) != -1) { + if (status == 0) status = docall(L, 0, 0); + report(L, status); + if (status == 0 && lua_gettop(L) > 0) { /* any result to print? */ + lua_getglobal(L, "print"); + lua_insert(L, 1); + if (lua_pcall(L, lua_gettop(L)-1, 0, 0) != 0) + l_message(progname, lua_pushfstring(L, + "error calling " LUA_QL("print") " (%s)", + lua_tostring(L, -1))); + } + } + lua_settop(L, 0); /* clear stack */ + fputs("\n", stdout); + fflush(stdout); + progname = oldprogname; +} + + +static int handle_script (lua_State *L, char **argv, int n) { + int status; + const char *fname; + int narg = getargs(L, argv, n); /* collect arguments */ + lua_setglobal(L, "arg"); + fname = argv[n]; + if (strcmp(fname, "-") == 0 && strcmp(argv[n-1], "--") != 0) + fname = NULL; /* stdin */ + status = luaL_loadfile(L, fname); + lua_insert(L, -(narg+1)); + if (status == 0) + status = docall(L, narg, 0); + else + lua_pop(L, narg); + return report(L, status); +} + + +static int collectargs (char **argv, int *pi, int *pv, int *pe) { + int i; + for (i = 1; argv[i] != NULL; i++) { + if (argv[i][0] != '-') /* not an option? */ + return i; + switch (argv[i][1]) { /* option */ + case '-': return (argv[i+1] != NULL ? i+1 : 0); + case '\0': return i; + case 'i': *pi = 1; /* go through */ + case 'v': *pv = 1; break; + case 'e': *pe = 1; /* go through */ + case 'l': + if (argv[i][2] == '\0') { + i++; + if (argv[i] == NULL) return -1; + } + break; + default: return -1; /* invalid option */ + } + } + return 0; +} + + +static int runargs (lua_State *L, char **argv, int n) { + int i; + for (i = 1; i < n; i++) { + if (argv[i] == NULL) continue; + lua_assert(argv[i][0] == '-'); + switch (argv[i][1]) { /* option */ + case 'e': { + const char *chunk = argv[i] + 2; + if (*chunk == '\0') chunk = argv[++i]; + lua_assert(chunk != NULL); + if (dostring(L, chunk, "=(command line)") != 0) + return 1; + break; + } + case 'l': { + const char *filename = argv[i] + 2; + if (*filename == '\0') filename = argv[++i]; + lua_assert(filename != NULL); + if (dolibrary(L, filename)) + return 1; /* stop if file fails */ + break; + } + default: break; + } + } + return 0; +} + + +static int handle_luainit (lua_State *L) { + const char *init = getenv("LUA_INIT"); + if (init == NULL) return 0; /* status OK */ + else if (init[0] == '@') + return dofile(L, init+1); + else + return dostring(L, init, "=LUA_INIT"); +} + + +struct Smain { + int argc; + char **argv; + int status; +}; + + +static int pmain (lua_State *L) { + struct Smain *s = (struct Smain *)lua_touserdata(L, 1); + char **argv = s->argv; + int script; + int has_i = 0, has_v = 0, has_e = 0; + globalL = L; + if (argv[0] && argv[0][0]) progname = argv[0]; + lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ + luaL_openlibs(L); /* open libraries */ + lua_gc(L, LUA_GCRESTART, 0); + s->status = handle_luainit(L); + if (s->status != 0) return 0; + script = collectargs(argv, &has_i, &has_v, &has_e); + if (script < 0) { /* invalid args? */ + print_usage(); + s->status = 1; + return 0; + } + if (has_v) print_version(); + s->status = runargs(L, argv, (script > 0) ? script : s->argc); + if (s->status != 0) return 0; + if (script) + s->status = handle_script(L, argv, script); + if (s->status != 0) return 0; + if (has_i) + dotty(L); + else if (script == 0 && !has_e && !has_v) { + if (lua_stdin_is_tty()) { + print_version(); + dotty(L); + } + else dofile(L, NULL); /* executes stdin as a file */ + } + return 0; +} + + +int main (int argc, char **argv) { + int status; + struct Smain s; + lua_State *L = lua_open(); /* create state */ + if (L == NULL) { + l_message(argv[0], "cannot create state: not enough memory"); + return EXIT_FAILURE; + } + s.argc = argc; + s.argv = argv; + status = lua_cpcall(L, &pmain, &s); + report(L, status); + lua_close(L); + return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS; +} + diff --git a/deps/lua/src/lua.h b/deps/lua/src/lua.h new file mode 100644 index 0000000000000000000000000000000000000000..881f834555945d01b101984eb2eea22881258dee --- /dev/null +++ b/deps/lua/src/lua.h @@ -0,0 +1,384 @@ +/* +** $Id: lua.h,v 1.216 2006/01/10 12:50:13 roberto Exp $ +** Lua - An Extensible Extension Language +** Lua.org, PUC-Rio, Brazil (http://www.lua.org) +** See Copyright Notice at the end of this file +*/ + + +#ifndef lua_h +#define lua_h + +#include +#include + + +#include "luaconf.h" + + +#define LUA_VERSION "Lua 5.1" +#define LUA_VERSION_NUM 501 +#define LUA_COPYRIGHT "Copyright (C) 1994-2006 Lua.org, PUC-Rio" +#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" + + +/* mark for precompiled code (`Lua') */ +#define LUA_SIGNATURE "\033Lua" + +/* option for multiple returns in `lua_pcall' and `lua_call' */ +#define LUA_MULTRET (-1) + + +/* +** pseudo-indices +*/ +#define LUA_REGISTRYINDEX (-10000) +#define LUA_ENVIRONINDEX (-10001) +#define LUA_GLOBALSINDEX (-10002) +#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) + + +/* thread status; 0 is OK */ +#define LUA_YIELD 1 +#define LUA_ERRRUN 2 +#define LUA_ERRSYNTAX 3 +#define LUA_ERRMEM 4 +#define LUA_ERRERR 5 + + +typedef struct lua_State lua_State; + +typedef int (*lua_CFunction) (lua_State *L); + + +/* +** functions that read/write blocks when loading/dumping Lua chunks +*/ +typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); + +typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); + + +/* +** prototype for memory-allocation functions +*/ +typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); + + +/* +** basic types +*/ +#define LUA_TNONE (-1) + +#define LUA_TNIL 0 +#define LUA_TBOOLEAN 1 +#define LUA_TLIGHTUSERDATA 2 +#define LUA_TNUMBER 3 +#define LUA_TSTRING 4 +#define LUA_TTABLE 5 +#define LUA_TFUNCTION 6 +#define LUA_TUSERDATA 7 +#define LUA_TTHREAD 8 + + + +/* minimum Lua stack available to a C function */ +#define LUA_MINSTACK 20 + + +/* +** generic extra include file +*/ +#if defined(LUA_USER_H) +#include LUA_USER_H +#endif + + +/* type of numbers in Lua */ +typedef LUA_NUMBER lua_Number; + + +/* type for integer functions */ +typedef LUA_INTEGER lua_Integer; + + + +/* +** state manipulation +*/ +LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); +LUA_API void (lua_close) (lua_State *L); +LUA_API lua_State *(lua_newthread) (lua_State *L); + +LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); + + +/* +** basic stack manipulation +*/ +LUA_API int (lua_gettop) (lua_State *L); +LUA_API void (lua_settop) (lua_State *L, int idx); +LUA_API void (lua_pushvalue) (lua_State *L, int idx); +LUA_API void (lua_remove) (lua_State *L, int idx); +LUA_API void (lua_insert) (lua_State *L, int idx); +LUA_API void (lua_replace) (lua_State *L, int idx); +LUA_API int (lua_checkstack) (lua_State *L, int sz); + +LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); + + +/* +** access functions (stack -> C) +*/ + +LUA_API int (lua_isnumber) (lua_State *L, int idx); +LUA_API int (lua_isstring) (lua_State *L, int idx); +LUA_API int (lua_iscfunction) (lua_State *L, int idx); +LUA_API int (lua_isuserdata) (lua_State *L, int idx); +LUA_API int (lua_type) (lua_State *L, int idx); +LUA_API const char *(lua_typename) (lua_State *L, int tp); + +LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); + +LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); +LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); +LUA_API int (lua_toboolean) (lua_State *L, int idx); +LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); +LUA_API size_t (lua_objlen) (lua_State *L, int idx); +LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); +LUA_API void *(lua_touserdata) (lua_State *L, int idx); +LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); +LUA_API const void *(lua_topointer) (lua_State *L, int idx); + + +/* +** push functions (C -> stack) +*/ +LUA_API void (lua_pushnil) (lua_State *L); +LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); +LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); +LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); +LUA_API void (lua_pushstring) (lua_State *L, const char *s); +LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, + va_list argp); +LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); +LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); +LUA_API void (lua_pushboolean) (lua_State *L, int b); +LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); +LUA_API int (lua_pushthread) (lua_State *L); + + +/* +** get functions (Lua -> stack) +*/ +LUA_API void (lua_gettable) (lua_State *L, int idx); +LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawget) (lua_State *L, int idx); +LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); +LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); +LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); +LUA_API int (lua_getmetatable) (lua_State *L, int objindex); +LUA_API void (lua_getfenv) (lua_State *L, int idx); + + +/* +** set functions (stack -> Lua) +*/ +LUA_API void (lua_settable) (lua_State *L, int idx); +LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawset) (lua_State *L, int idx); +LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); +LUA_API int (lua_setmetatable) (lua_State *L, int objindex); +LUA_API int (lua_setfenv) (lua_State *L, int idx); + + +/* +** `load' and `call' functions (load and run Lua code) +*/ +LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); +LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); +LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); +LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname); + +LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); + + +/* +** coroutine functions +*/ +LUA_API int (lua_yield) (lua_State *L, int nresults); +LUA_API int (lua_resume) (lua_State *L, int narg); +LUA_API int (lua_status) (lua_State *L); + +/* +** garbage-collection function and options +*/ + +#define LUA_GCSTOP 0 +#define LUA_GCRESTART 1 +#define LUA_GCCOLLECT 2 +#define LUA_GCCOUNT 3 +#define LUA_GCCOUNTB 4 +#define LUA_GCSTEP 5 +#define LUA_GCSETPAUSE 6 +#define LUA_GCSETSTEPMUL 7 + +LUA_API int (lua_gc) (lua_State *L, int what, int data); + + +/* +** miscellaneous functions +*/ + +LUA_API int (lua_error) (lua_State *L); + +LUA_API int (lua_next) (lua_State *L, int idx); + +LUA_API void (lua_concat) (lua_State *L, int n); + +LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); + + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define lua_pop(L,n) lua_settop(L, -(n)-1) + +#define lua_newtable(L) lua_createtable(L, 0, 0) + +#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) + +#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) + +#define lua_strlen(L,i) lua_objlen(L, (i)) + +#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) +#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) +#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) +#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) +#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) +#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) +#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) +#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) + +#define lua_pushliteral(L, s) \ + lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) + +#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) +#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) + +#define lua_tostring(L,i) lua_tolstring(L, (i), NULL) + + + +/* +** compatibility macros and functions +*/ + +#define lua_open() luaL_newstate() + +#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) + +#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) + +#define lua_Chunkreader lua_Reader +#define lua_Chunkwriter lua_Writer + + + +/* +** {====================================================================== +** Debug API +** ======================================================================= +*/ + + +/* +** Event codes +*/ +#define LUA_HOOKCALL 0 +#define LUA_HOOKRET 1 +#define LUA_HOOKLINE 2 +#define LUA_HOOKCOUNT 3 +#define LUA_HOOKTAILRET 4 + + +/* +** Event masks +*/ +#define LUA_MASKCALL (1 << LUA_HOOKCALL) +#define LUA_MASKRET (1 << LUA_HOOKRET) +#define LUA_MASKLINE (1 << LUA_HOOKLINE) +#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) + +typedef struct lua_Debug lua_Debug; /* activation record */ + + +/* Functions to be called by the debuger in specific events */ +typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); + + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); + +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); +LUA_API lua_Hook lua_gethook (lua_State *L); +LUA_API int lua_gethookmask (lua_State *L); +LUA_API int lua_gethookcount (lua_State *L); + + +struct lua_Debug { + int event; + const char *name; /* (n) */ + const char *namewhat; /* (n) `global', `local', `field', `method' */ + const char *what; /* (S) `Lua', `C', `main', `tail' */ + const char *source; /* (S) */ + int currentline; /* (l) */ + int nups; /* (u) number of upvalues */ + int linedefined; /* (S) */ + int lastlinedefined; /* (S) */ + char short_src[LUA_IDSIZE]; /* (S) */ + /* private part */ + int i_ci; /* active function */ +}; + +/* }====================================================================== */ + + +/****************************************************************************** +* Copyright (C) 1994-2006 Lua.org, PUC-Rio. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + + +#endif diff --git a/deps/lua/src/luac.c b/deps/lua/src/luac.c new file mode 100644 index 0000000000000000000000000000000000000000..2dd76b76535f75e35271f465909307551afdf5be --- /dev/null +++ b/deps/lua/src/luac.c @@ -0,0 +1,196 @@ +/* +** $Id: luac.c,v 1.52 2005/11/11 14:03:13 lhf Exp $ +** Lua compiler (saves bytecodes to files; also list bytecodes) +** See Copyright Notice in lua.h +*/ + +#include +#include +#include +#include + +#define luac_c +#define LUA_CORE + +#include "lua.h" +#include "lauxlib.h" + +#include "ldo.h" +#include "lfunc.h" +#include "lmem.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lstring.h" +#include "lundump.h" + +#define PROGNAME "luac" /* default program name */ +#define OUTPUT PROGNAME ".out" /* default output file */ + +static int listing=0; /* list bytecodes? */ +static int dumping=1; /* dump bytecodes? */ +static int stripping=0; /* strip debug information? */ +static char Output[]={ OUTPUT }; /* default output file name */ +static const char* output=Output; /* actual output file name */ +static const char* progname=PROGNAME; /* actual program name */ + +static void fatal(const char* message) +{ + fprintf(stderr,"%s: %s\n",progname,message); + exit(EXIT_FAILURE); +} + +static void cannot(const char* what) +{ + fprintf(stderr,"%s: cannot %s %s: %s\n",progname,what,output,strerror(errno)); + exit(EXIT_FAILURE); +} + +static void usage(const char* message) +{ + if (*message=='-') + fprintf(stderr,"%s: unrecognized option " LUA_QS "\n",progname,message); + else + fprintf(stderr,"%s: %s\n",progname,message); + fprintf(stderr, + "usage: %s [options] [filenames].\n" + "Available options are:\n" + " - process stdin\n" + " -l list\n" + " -o name output to file " LUA_QL("name") " (default is \"%s\")\n" + " -p parse only\n" + " -s strip debug information\n" + " -v show version information\n" + " -- stop handling options\n", + progname,Output); + exit(EXIT_FAILURE); +} + +#define IS(s) (strcmp(argv[i],s)==0) + +static int doargs(int argc, char* argv[]) +{ + int i; + if (argv[0]!=NULL && *argv[0]!=0) progname=argv[0]; + for (i=1; itop+(i))->l.p) + +static Proto* combine(lua_State* L, int n) +{ + if (n==1) + return toproto(L,-1); + else + { + int i,pc; + Proto* f=luaF_newproto(L); + setptvalue2s(L,L->top,f); incr_top(L); + f->source=luaS_newliteral(L,"=(" PROGNAME ")"); + f->maxstacksize=1; + pc=2*n+1; + f->code=luaM_newvector(L,pc,Instruction); + f->sizecode=pc; + f->p=luaM_newvector(L,n,Proto*); + f->sizep=n; + pc=0; + for (i=0; ip[i]=toproto(L,i-n-1); + f->code[pc++]=CREATE_ABx(OP_CLOSURE,0,i); + f->code[pc++]=CREATE_ABC(OP_CALL,0,1,1); + } + f->code[pc++]=CREATE_ABC(OP_RETURN,0,1,0); + return f; + } +} + +static int writer(lua_State* L, const void* p, size_t size, void* u) +{ + UNUSED(L); + return (fwrite(p,size,1,(FILE*)u)!=1) && (size!=0); +} + +struct Smain { + int argc; + char** argv; +}; + +static int pmain(lua_State* L) +{ + struct Smain* s = (struct Smain*)lua_touserdata(L, 1); + int argc=s->argc; + char** argv=s->argv; + Proto* f; + int i; + if (!lua_checkstack(L,argc)) fatal("too many input files"); + for (i=0; i1); + if (dumping) + { + FILE* D= (output==NULL) ? stdout : fopen(output,"wb"); + if (D==NULL) cannot("open"); + lua_lock(L); + luaU_dump(L,f,writer,D,stripping); + lua_unlock(L); + if (ferror(D)) cannot("write"); + if (fclose(D)) cannot("close"); + } + return 0; +} + +int main(int argc, char* argv[]) +{ + lua_State* L; + struct Smain s; + int i=doargs(argc,argv); + argc-=i; argv+=i; + if (argc<=0) usage("no input files given"); + L=lua_open(); + if (L==NULL) fatal("not enough memory for state"); + s.argc=argc; + s.argv=argv; + if (lua_cpcall(L,pmain,&s)!=0) fatal(lua_tostring(L,-1)); + lua_close(L); + return EXIT_SUCCESS; +} diff --git a/deps/lua/src/luaconf.h b/deps/lua/src/luaconf.h new file mode 100644 index 0000000000000000000000000000000000000000..97a3e30c0eb1cf207125434c1bbda5966f0d209f --- /dev/null +++ b/deps/lua/src/luaconf.h @@ -0,0 +1,736 @@ +/* +** $Id: luaconf.h,v 1.81 2006/02/10 17:44:06 roberto Exp $ +** Configuration file for Lua +** See Copyright Notice in lua.h +*/ + + +#ifndef lconfig_h +#define lconfig_h + +#include +#include + + +/* +** ================================================================== +** Search for "@@" to find all configurable definitions. +** =================================================================== +*/ + + +/* +@@ LUA_ANSI controls the use of non-ansi features. +** CHANGE it (define it) if you want Lua to avoid the use of any +** non-ansi feature or library. +*/ +#if defined(__STRICT_ANSI__) +#define LUA_ANSI +#endif + + +#if !defined(LUA_ANSI) && defined(_WIN32) +#define LUA_WIN +#endif + +#if defined(LUA_USE_LINUX) +#define LUA_USE_POSIX +#define LUA_USE_DLOPEN /* needs an extra library: -ldl */ +#define LUA_USE_READLINE /* needs some extra libraries */ +#endif + +#if defined(LUA_USE_MACOSX) +#define LUA_USE_POSIX +#define LUA_DL_DYLD /* does not need extra library */ +#endif + + + +/* +@@ LUA_USE_POSIX includes all functionallity listed as X/Open System +@* Interfaces Extension (XSI). +** CHANGE it (define it) if your system is XSI compatible. +*/ +#if defined(LUA_USE_POSIX) +#define LUA_USE_MKSTEMP +#define LUA_USE_ISATTY +#define LUA_USE_POPEN +#define LUA_USE_ULONGJMP +#endif + + +/* +@@ LUA_PATH_DEFAULT is the default path that Lua uses to look for +@* Lua libraries. +@@ LUA_CPATH_DEFAULT is the default path that Lua uses to look for +@* C libraries. +** CHANGE them if your machine has a non-conventional directory +** hierarchy or if you want to install your libraries in +** non-conventional directories. +*/ +#if defined(_WIN32) +/* +** In Windows, any exclamation mark ('!') in the path is replaced by the +** path of the directory of the executable file of the current process. +*/ +#define LUA_LDIR "!\\lua\\" +#define LUA_CDIR "!\\" +#define LUA_PATH_DEFAULT \ + ".\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" \ + LUA_CDIR"?.lua;" LUA_CDIR"?\\init.lua" +#define LUA_CPATH_DEFAULT \ + ".\\?.dll;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll" + +#else +#define LUA_ROOT "/usr/local/" +#define LUA_LDIR LUA_ROOT "share/lua/5.1/" +#define LUA_CDIR LUA_ROOT "lib/lua/5.1/" +#define LUA_PATH_DEFAULT \ + "./?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?/init.lua;" \ + LUA_CDIR"?.lua;" LUA_CDIR"?/init.lua" +#define LUA_CPATH_DEFAULT \ + "./?.so;" LUA_CDIR"?.so;" LUA_CDIR"loadall.so" +#endif + + +/* +@@ LUA_DIRSEP is the directory separator (for submodules). +** CHANGE it if your machine does not use "/" as the directory separator +** and is not Windows. (On Windows Lua automatically uses "\".) +*/ +#if defined(_WIN32) +#define LUA_DIRSEP "\\" +#else +#define LUA_DIRSEP "/" +#endif + + +/* +@@ LUA_PATHSEP is the character that separates templates in a path. +@@ LUA_PATH_MARK is the string that marks the substitution points in a +@* template. +@@ LUA_EXECDIR in a Windows path is replaced by the executable's +@* directory. +@@ LUA_IGMARK is a mark to ignore all before it when bulding the +@* luaopen_ function name. +** CHANGE them if for some reason your system cannot use those +** characters. (E.g., if one of those characters is a common character +** in file/directory names.) Probably you do not need to change them. +*/ +#define LUA_PATHSEP ";" +#define LUA_PATH_MARK "?" +#define LUA_EXECDIR "!" +#define LUA_IGMARK "-" + + +/* +@@ LUA_INTEGER is the integral type used by lua_pushinteger/lua_tointeger. +** CHANGE that if ptrdiff_t is not adequate on your machine. (On most +** machines, ptrdiff_t gives a good choice between int or long.) +*/ +#define LUA_INTEGER ptrdiff_t + + +/* +@@ LUA_API is a mark for all core API functions. +@@ LUALIB_API is a mark for all standard library functions. +** CHANGE them if you need to define those functions in some special way. +** For instance, if you want to create one Windows DLL with the core and +** the libraries, you may want to use the following definition (define +** LUA_BUILD_AS_DLL to get it). +*/ +#if defined(LUA_BUILD_AS_DLL) + +#if defined(LUA_CORE) || defined(LUA_LIB) +#define LUA_API __declspec(dllexport) +#else +#define LUA_API __declspec(dllimport) +#endif + +#else + +#define LUA_API extern + +#endif + +/* more often than not the libs go together with the core */ +#define LUALIB_API LUA_API + + +/* +@@ LUAI_FUNC is a mark for all extern functions that are not to be +@* exported to outside modules. +@@ LUAI_DATA is a mark for all extern (const) variables that are not to +@* be exported to outside modules. +** CHANGE them if you need to mark them in some special way. Elf/gcc +** (versions 3.2 and later) mark them as "hidden" to optimize access +** when Lua is compiled as a shared library. +*/ +#if defined(luaall_c) +#define LUAI_FUNC static +#define LUAI_DATA /* empty */ + +#elif defined(__GNUC__) && ((__GNUC__*100 + __GNUC_MINOR__) >= 302) && \ + defined(__ELF__) +#define LUAI_FUNC __attribute__((visibility("hidden"))) extern +#define LUAI_DATA LUAI_FUNC + +#else +#define LUAI_FUNC extern +#define LUAI_DATA extern +#endif + + + +/* +@@ LUA_QL describes how error messages quote program elements. +** CHANGE it if you want a different appearance. +*/ +#define LUA_QL(x) "'" x "'" +#define LUA_QS LUA_QL("%s") + + +/* +@@ LUA_IDSIZE gives the maximum size for the description of the source +@* of a function in debug information. +** CHANGE it if you want a different size. +*/ +#define LUA_IDSIZE 60 + + +/* +** {================================================================== +** Stand-alone configuration +** =================================================================== +*/ + +#if defined(lua_c) || defined(luaall_c) + +/* +@@ lua_stdin_is_tty detects whether the standard input is a 'tty' (that +@* is, whether we're running lua interactively). +** CHANGE it if you have a better definition for non-POSIX/non-Windows +** systems. +*/ +#if defined(LUA_USE_ISATTY) +#include +#define lua_stdin_is_tty() isatty(0) +#elif defined(LUA_WIN) +#include +#include +#define lua_stdin_is_tty() _isatty(_fileno(stdin)) +#else +#define lua_stdin_is_tty() 1 /* assume stdin is a tty */ +#endif + + +/* +@@ LUA_PROMPT is the default prompt used by stand-alone Lua. +@@ LUA_PROMPT2 is the default continuation prompt used by stand-alone Lua. +** CHANGE them if you want different prompts. (You can also change the +** prompts dynamically, assigning to globals _PROMPT/_PROMPT2.) +*/ +#define LUA_PROMPT "> " +#define LUA_PROMPT2 ">> " + + +/* +@@ LUA_PROGNAME is the default name for the stand-alone Lua program. +** CHANGE it if your stand-alone interpreter has a different name and +** your system is not able to detect that name automatically. +*/ +#define LUA_PROGNAME "lua" + + +/* +@@ LUA_MAXINPUT is the maximum length for an input line in the +@* stand-alone interpreter. +** CHANGE it if you need longer lines. +*/ +#define LUA_MAXINPUT 512 + + +/* +@@ lua_readline defines how to show a prompt and then read a line from +@* the standard input. +@@ lua_saveline defines how to "save" a read line in a "history". +@@ lua_freeline defines how to free a line read by lua_readline. +** CHANGE them if you want to improve this functionality (e.g., by using +** GNU readline and history facilities). +*/ +#if defined(LUA_USE_READLINE) +#include +#include +#include +#define lua_readline(L,b,p) ((void)L, ((b)=readline(p)) != NULL) +#define lua_saveline(L,idx) \ + if (lua_strlen(L,idx) > 0) /* non-empty line? */ \ + add_history(lua_tostring(L, idx)); /* add it to history */ +#define lua_freeline(L,b) ((void)L, free(b)) +#else +#define lua_readline(L,b,p) \ + ((void)L, fputs(p, stdout), fflush(stdout), /* show prompt */ \ + fgets(b, LUA_MAXINPUT, stdin) != NULL) /* get line */ +#define lua_saveline(L,idx) { (void)L; (void)idx; } +#define lua_freeline(L,b) { (void)L; (void)b; } +#endif + +#endif + +/* }================================================================== */ + + +/* +@@ LUAI_GCPAUSE defines the default pause between garbage-collector cycles +@* as a percentage. +** CHANGE it if you want the GC to run faster or slower (higher values +** mean larger pauses which mean slower collection.) You can also change +** this value dynamically. +*/ +#define LUAI_GCPAUSE 200 /* 200% (wait memory to double before next GC) */ + + +/* +@@ LUAI_GCMUL defines the default speed of garbage collection relative to +@* memory allocation as a percentage. +** CHANGE it if you want to change the granularity of the garbage +** collection. (Higher values mean coarser collections. 0 represents +** infinity, where each step performs a full collection.) You can also +** change this value dynamically. +*/ +#define LUAI_GCMUL 200 /* GC runs 'twice the speed' of memory allocation */ + + + +/* +@@ LUA_COMPAT_GETN controls compatibility with old getn behavior. +** CHANGE it (define it) if you want exact compatibility with the +** behavior of setn/getn in Lua 5.0. +*/ +#undef LUA_COMPAT_GETN + +/* +@@ LUA_COMPAT_LOADLIB controls compatibility about global loadlib. +** CHANGE it to undefined as soon as you do not need a global 'loadlib' +** function (the function is still available as 'package.loadlib'). +*/ +#undef LUA_COMPAT_LOADLIB + +/* +@@ LUA_COMPAT_VARARG controls compatibility with old vararg feature. +** CHANGE it to undefined as soon as your programs use only '...' to +** access vararg parameters (instead of the old 'arg' table). +*/ +#define LUA_COMPAT_VARARG + +/* +@@ LUA_COMPAT_MOD controls compatibility with old math.mod function. +** CHANGE it to undefined as soon as your programs use 'math.fmod' or +** the new '%' operator instead of 'math.mod'. +*/ +#define LUA_COMPAT_MOD + +/* +@@ LUA_COMPAT_LSTR controls compatibility with old long string nesting +@* facility. +** CHANGE it to 2 if you want the old behaviour, or undefine it to turn +** off the advisory error when nesting [[...]]. +*/ +#define LUA_COMPAT_LSTR 1 + +/* +@@ LUA_COMPAT_GFIND controls compatibility with old 'string.gfind' name. +** CHANGE it to undefined as soon as you rename 'string.gfind' to +** 'string.gmatch'. +*/ +#define LUA_COMPAT_GFIND + +/* +@@ LUA_COMPAT_OPENLIB controls compatibility with old 'luaL_openlib' +@* behavior. +** CHANGE it to undefined as soon as you replace to 'luaL_registry' +** your uses of 'luaL_openlib' +*/ +#define LUA_COMPAT_OPENLIB + + + +/* +@@ luai_apicheck is the assert macro used by the Lua-C API. +** CHANGE luai_apicheck if you want Lua to perform some checks in the +** parameters it gets from API calls. This may slow down the interpreter +** a bit, but may be quite useful when debugging C code that interfaces +** with Lua. A useful redefinition is to use assert.h. +*/ +#if defined(LUA_USE_APICHECK) +#include +#define luai_apicheck(L,o) { (void)L; assert(o); } +#else +#define luai_apicheck(L,o) { (void)L; } +#endif + + +/* +@@ LUAI_BITSINT defines the number of bits in an int. +** CHANGE here if Lua cannot automatically detect the number of bits of +** your machine. Probably you do not need to change this. +*/ +/* avoid overflows in comparison */ +#if INT_MAX-20 < 32760 +#define LUAI_BITSINT 16 +#elif INT_MAX > 2147483640L +/* int has at least 32 bits */ +#define LUAI_BITSINT 32 +#else +#error "you must define LUA_BITSINT with number of bits in an integer" +#endif + + +/* +@@ LUAI_UINT32 is an unsigned integer with at least 32 bits. +@@ LUAI_INT32 is an signed integer with at least 32 bits. +@@ LUAI_UMEM is an unsigned integer big enough to count the total +@* memory used by Lua. +@@ LUAI_MEM is a signed integer big enough to count the total memory +@* used by Lua. +** CHANGE here if for some weird reason the default definitions are not +** good enough for your machine. (The definitions in the 'else' +** part always works, but may waste space on machines with 64-bit +** longs.) Probably you do not need to change this. +*/ +#if LUAI_BITSINT >= 32 +#define LUAI_UINT32 unsigned int +#define LUAI_INT32 int +#define LUAI_MAXINT32 INT_MAX +#define LUAI_UMEM size_t +#define LUAI_MEM ptrdiff_t +#else +/* 16-bit ints */ +#define LUAI_UINT32 unsigned long +#define LUAI_INT32 long +#define LUAI_MAXINT32 LONG_MAX +#define LUAI_UMEM unsigned long +#define LUAI_MEM long +#endif + + +/* +@@ LUAI_MAXCALLS limits the number of nested calls. +** CHANGE it if you need really deep recursive calls. This limit is +** arbitrary; its only purpose is to stop infinite recursion before +** exhausting memory. +*/ +#define LUAI_MAXCALLS 20000 + + +/* +@@ LUAI_MAXCSTACK limits the number of Lua stack slots that a C function +@* can use. +** CHANGE it if you need lots of (Lua) stack space for your C +** functions. This limit is arbitrary; its only purpose is to stop C +** functions to consume unlimited stack space. +*/ +#define LUAI_MAXCSTACK 2048 + + + +/* +** {================================================================== +** CHANGE (to smaller values) the following definitions if your system +** has a small C stack. (Or you may want to change them to larger +** values if your system has a large C stack and these limits are +** too rigid for you.) Some of these constants control the size of +** stack-allocated arrays used by the compiler or the interpreter, while +** others limit the maximum number of recursive calls that the compiler +** or the interpreter can perform. Values too large may cause a C stack +** overflow for some forms of deep constructs. +** =================================================================== +*/ + + +/* +@@ LUAI_MAXCCALLS is the maximum depth for nested C calls (short) and +@* syntactical nested non-terminals in a program. +*/ +#define LUAI_MAXCCALLS 200 + + +/* +@@ LUAI_MAXVARS is the maximum number of local variables per function +@* (must be smaller than 250). +*/ +#define LUAI_MAXVARS 200 + + +/* +@@ LUAI_MAXUPVALUES is the maximum number of upvalues per function +@* (must be smaller than 250). +*/ +#define LUAI_MAXUPVALUES 60 + + +/* +@@ LUAL_BUFFERSIZE is the buffer size used by the lauxlib buffer system. +*/ +#define LUAL_BUFFERSIZE BUFSIZ + +/* }================================================================== */ + + + + +/* +** {================================================================== +@@ LUA_NUMBER is the type of numbers in Lua. +** CHANGE the following definitions only if you want to build Lua +** with a number type different from double. You may also need to +** change lua_number2int & lua_number2integer. +** =================================================================== +*/ + +#define LUA_NUMBER_DOUBLE +#define LUA_NUMBER double + +/* +@@ LUAI_UACNUMBER is the result of an 'usual argument conversion' +@* over a number. +*/ +#define LUAI_UACNUMBER double + + +/* +@@ LUA_NUMBER_SCAN is the format for reading numbers. +@@ LUA_NUMBER_FMT is the format for writing numbers. +@@ lua_number2str converts a number to a string. +@@ LUAI_MAXNUMBER2STR is maximum size of previous conversion. +@@ lua_str2number converts a string to a number. +*/ +#define LUA_NUMBER_SCAN "%lf" +#define LUA_NUMBER_FMT "%.14g" +#define lua_number2str(s,n) sprintf((s), LUA_NUMBER_FMT, (n)) +#define LUAI_MAXNUMBER2STR 32 /* 16 digits, sign, point, and \0 */ +#define lua_str2number(s,p) strtod((s), (p)) + + +/* +@@ The luai_num* macros define the primitive operations over numbers. +*/ +#if defined(LUA_CORE) +#include +#define luai_numadd(a,b) ((a)+(b)) +#define luai_numsub(a,b) ((a)-(b)) +#define luai_nummul(a,b) ((a)*(b)) +#define luai_numdiv(a,b) ((a)/(b)) +#define luai_nummod(a,b) ((a) - floor((a)/(b))*(b)) +#define luai_numpow(a,b) (pow(a,b)) +#define luai_numunm(a) (-(a)) +#define luai_numeq(a,b) ((a)==(b)) +#define luai_numlt(a,b) ((a)<(b)) +#define luai_numle(a,b) ((a)<=(b)) +#define luai_numisnan(a) (!luai_numeq((a), (a))) +#endif + + +/* +@@ lua_number2int is a macro to convert lua_Number to int. +@@ lua_number2integer is a macro to convert lua_Number to lua_Integer. +** CHANGE them if you know a faster way to convert a lua_Number to +** int (with any rounding method and without throwing errors) in your +** system. In Pentium machines, a naive typecast from double to int +** in C is extremely slow, so any alternative is worth trying. +*/ + +/* On a Pentium, resort to a trick */ +#if defined(LUA_NUMBER_DOUBLE) && !defined(LUA_ANSI) && !defined(__SSE2__) && \ + (defined(__i386) || defined (_M_IX86) || defined(__i386__)) +union luai_Cast { double l_d; long l_l; }; +#define lua_number2int(i,d) \ + { volatile union luai_Cast u; u.l_d = (d) + 6755399441055744.0; (i) = u.l_l; } +#define lua_number2integer(i,n) lua_number2int(i, n) + +/* this option always works, but may be slow */ +#else +#define lua_number2int(i,d) ((i)=(int)(d)) +#define lua_number2integer(i,d) ((i)=(lua_Integer)(d)) + +#endif + +/* }================================================================== */ + + +/* +@@ LUAI_USER_ALIGNMENT_T is a type that requires maximum alignment. +** CHANGE it if your system requires alignments larger than double. (For +** instance, if your system supports long doubles and they must be +** aligned in 16-byte boundaries, then you should add long double in the +** union.) Probably you do not need to change this. +*/ +#define LUAI_USER_ALIGNMENT_T union { double u; void *s; long l; } + + +/* +@@ LUAI_THROW/LUAI_TRY define how Lua does exception handling. +** CHANGE them if you prefer to use longjmp/setjmp even with C++ +** or if want/don't to use _longjmp/_setjmp instead of regular +** longjmp/setjmp. By default, Lua handles errors with exceptions when +** compiling as C++ code, with _longjmp/_setjmp when asked to use them, +** and with longjmp/setjmp otherwise. +*/ +#if defined(__cplusplus) +/* C++ exceptions */ +#define LUAI_THROW(L,c) throw(c) +#define LUAI_TRY(L,c,a) try { a } catch(...) \ + { if ((c)->status == 0) (c)->status = -1; } +#define luai_jmpbuf int /* dummy variable */ + +#elif defined(LUA_USE_ULONGJMP) +/* in Unix, try _longjmp/_setjmp (more efficient) */ +#define LUAI_THROW(L,c) _longjmp((c)->b, 1) +#define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a } +#define luai_jmpbuf jmp_buf + +#else +/* default handling with long jumps */ +#define LUAI_THROW(L,c) longjmp((c)->b, 1) +#define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a } +#define luai_jmpbuf jmp_buf + +#endif + + +/* +@@ LUA_MAXCAPTURES is the maximum number of captures that a pattern +@* can do during pattern-matching. +** CHANGE it if you need more captures. This limit is arbitrary. +*/ +#define LUA_MAXCAPTURES 32 + + +/* +@@ lua_tmpnam is the function that the OS library uses to create a +@* temporary name. +@@ LUA_TMPNAMBUFSIZE is the maximum size of a name created by lua_tmpnam. +** CHANGE them if you have an alternative to tmpnam (which is considered +** insecure) or if you want the original tmpnam anyway. By default, Lua +** uses tmpnam except when POSIX is available, where it uses mkstemp. +*/ +#if defined(loslib_c) || defined(luaall_c) + +#if defined(LUA_USE_MKSTEMP) +#include +#define LUA_TMPNAMBUFSIZE 32 +#define lua_tmpnam(b,e) { \ + strcpy(b, "/tmp/lua_XXXXXX"); \ + e = mkstemp(b); \ + if (e != -1) close(e); \ + e = (e == -1); } + +#else +#define LUA_TMPNAMBUFSIZE L_tmpnam +#define lua_tmpnam(b,e) { e = (tmpnam(b) == NULL); } +#endif + +#endif + + +/* +@@ lua_popen spawns a new process connected to the current one through +@* the file streams. +** CHANGE it if you have a way to implement it in your system. +*/ +#if defined(LUA_USE_POPEN) + +#define lua_popen(L,c,m) ((void)L, popen(c,m)) +#define lua_pclose(L,file) ((void)L, (pclose(file) != -1)) + +#elif defined(LUA_WIN) + +#define lua_popen(L,c,m) ((void)L, _popen(c,m)) +#define lua_pclose(L,file) ((void)L, (_pclose(file) != -1)) + +#else + +#define lua_popen(L,c,m) ((void)((void)c, m), \ + luaL_error(L, LUA_QL("popen") " not supported"), (FILE*)0) +#define lua_pclose(L,file) ((void)((void)L, file), 0) + +#endif + +/* +@@ LUA_DL_* define which dynamic-library system Lua should use. +** CHANGE here if Lua has problems choosing the appropriate +** dynamic-library system for your platform (either Windows' DLL, Mac's +** dyld, or Unix's dlopen). If your system is some kind of Unix, there +** is a good chance that it has dlopen, so LUA_DL_DLOPEN will work for +** it. To use dlopen you also need to adapt the src/Makefile (probably +** adding -ldl to the linker options), so Lua does not select it +** automatically. (When you change the makefile to add -ldl, you must +** also add -DLUA_USE_DLOPEN.) +** If you do not want any kind of dynamic library, undefine all these +** options. +** By default, _WIN32 gets LUA_DL_DLL and MAC OS X gets LUA_DL_DYLD. +*/ +#if defined(LUA_USE_DLOPEN) +#define LUA_DL_DLOPEN +#endif + +#if defined(LUA_WIN) +#define LUA_DL_DLL +#endif + + +/* +@@ LUAI_EXTRASPACE allows you to add user-specific data in a lua_State +@* (the data goes just *before* the lua_State pointer). +** CHANGE (define) this if you really need that. This value must be +** a multiple of the maximum alignment required for your machine. +*/ +#define LUAI_EXTRASPACE 0 + + +/* +@@ luai_userstate* allow user-specific actions on threads. +** CHANGE them if you defined LUAI_EXTRASPACE and need to do something +** extra when a thread is created/deleted/resumed/yielded. +*/ +#define luai_userstateopen(L) ((void)L) +#define luai_userstateclose(L) ((void)L) +#define luai_userstatethread(L,L1) ((void)L) +#define luai_userstatefree(L) ((void)L) +#define luai_userstateresume(L,n) ((void)L) +#define luai_userstateyield(L,n) ((void)L) + + +/* +@@ LUA_INTFRMLEN is the length modifier for integer conversions +@* in 'string.format'. +@@ LUA_INTFRM_T is the integer type correspoding to the previous length +@* modifier. +** CHANGE them if your system supports long long or does not support long. +*/ + +#if defined(LUA_USELONGLONG) + +#define LUA_INTFRMLEN "ll" +#define LUA_INTFRM_T long long + +#else + +#define LUA_INTFRMLEN "l" +#define LUA_INTFRM_T long + +#endif + + + +/* =================================================================== */ + +/* +** Local configuration. You can use this space to add your redefinitions +** without modifying the main part of the file. +*/ + + + +#endif + diff --git a/deps/lua/src/lualib.h b/deps/lua/src/lualib.h new file mode 100644 index 0000000000000000000000000000000000000000..0c76232c0dd03737542483119723ebc2ae779c57 --- /dev/null +++ b/deps/lua/src/lualib.h @@ -0,0 +1,53 @@ +/* +** $Id: lualib.h,v 1.36 2005/12/27 17:12:00 roberto Exp $ +** Lua standard libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lualib_h +#define lualib_h + +#include "lua.h" + + +/* Key to file-handle type */ +#define LUA_FILEHANDLE "FILE*" + + +#define LUA_COLIBNAME "coroutine" +LUALIB_API int (luaopen_base) (lua_State *L); + +#define LUA_TABLIBNAME "table" +LUALIB_API int (luaopen_table) (lua_State *L); + +#define LUA_IOLIBNAME "io" +LUALIB_API int (luaopen_io) (lua_State *L); + +#define LUA_OSLIBNAME "os" +LUALIB_API int (luaopen_os) (lua_State *L); + +#define LUA_STRLIBNAME "string" +LUALIB_API int (luaopen_string) (lua_State *L); + +#define LUA_MATHLIBNAME "math" +LUALIB_API int (luaopen_math) (lua_State *L); + +#define LUA_DBLIBNAME "debug" +LUALIB_API int (luaopen_debug) (lua_State *L); + +#define LUA_LOADLIBNAME "package" +LUALIB_API int (luaopen_package) (lua_State *L); + + +/* open all previous libraries */ +LUALIB_API void (luaL_openlibs) (lua_State *L); + + + +#ifndef lua_assert +#define lua_assert(x) ((void)0) +#endif + + +#endif diff --git a/deps/lua/src/lundump.c b/deps/lua/src/lundump.c new file mode 100644 index 0000000000000000000000000000000000000000..7fc635eeb7bebb9ba08ca0151ce24bf0c0acb910 --- /dev/null +++ b/deps/lua/src/lundump.c @@ -0,0 +1,223 @@ +/* +** $Id: lundump.c,v 1.60 2006/02/16 15:53:49 lhf Exp $ +** load precompiled Lua chunks +** See Copyright Notice in lua.h +*/ + +#include + +#define lundump_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lmem.h" +#include "lobject.h" +#include "lstring.h" +#include "lundump.h" +#include "lzio.h" + +typedef struct { + lua_State* L; + ZIO* Z; + Mbuffer* b; + const char* name; +} LoadState; + +#ifdef LUAC_TRUST_BINARIES +#define IF(c,s) +#else +#define IF(c,s) if (c) error(S,s) + +static void error(LoadState* S, const char* why) +{ + luaO_pushfstring(S->L,"%s: %s in precompiled chunk",S->name,why); + luaD_throw(S->L,LUA_ERRSYNTAX); +} +#endif + +#define LoadMem(S,b,n,size) LoadBlock(S,b,(n)*(size)) +#define LoadByte(S) (lu_byte)LoadChar(S) +#define LoadVar(S,x) LoadMem(S,&x,1,sizeof(x)) +#define LoadVector(S,b,n,size) LoadMem(S,b,n,size) + +static void LoadBlock(LoadState* S, void* b, size_t size) +{ + size_t r=luaZ_read(S->Z,b,size); + IF (r!=0, "unexpected end"); +} + +static int LoadChar(LoadState* S) +{ + char x; + LoadVar(S,x); + return x; +} + +static int LoadInt(LoadState* S) +{ + int x; + LoadVar(S,x); + IF (x<0, "bad integer"); + return x; +} + +static lua_Number LoadNumber(LoadState* S) +{ + lua_Number x; + LoadVar(S,x); + return x; +} + +static TString* LoadString(LoadState* S) +{ + size_t size; + LoadVar(S,size); + if (size==0) + return NULL; + else + { + char* s=luaZ_openspace(S->L,S->b,size); + LoadBlock(S,s,size); + return luaS_newlstr(S->L,s,size-1); /* remove trailing '\0' */ + } +} + +static void LoadCode(LoadState* S, Proto* f) +{ + int n=LoadInt(S); + f->code=luaM_newvector(S->L,n,Instruction); + f->sizecode=n; + LoadVector(S,f->code,n,sizeof(Instruction)); +} + +static Proto* LoadFunction(LoadState* S, TString* p); + +static void LoadConstants(LoadState* S, Proto* f) +{ + int i,n; + n=LoadInt(S); + f->k=luaM_newvector(S->L,n,TValue); + f->sizek=n; + for (i=0; ik[i]); + for (i=0; ik[i]; + int t=LoadChar(S); + switch (t) + { + case LUA_TNIL: + setnilvalue(o); + break; + case LUA_TBOOLEAN: + setbvalue(o,LoadChar(S)); + break; + case LUA_TNUMBER: + setnvalue(o,LoadNumber(S)); + break; + case LUA_TSTRING: + setsvalue2n(S->L,o,LoadString(S)); + break; + default: + IF (1, "bad constant"); + break; + } + } + n=LoadInt(S); + f->p=luaM_newvector(S->L,n,Proto*); + f->sizep=n; + for (i=0; ip[i]=NULL; + for (i=0; ip[i]=LoadFunction(S,f->source); +} + +static void LoadDebug(LoadState* S, Proto* f) +{ + int i,n; + n=LoadInt(S); + f->lineinfo=luaM_newvector(S->L,n,int); + f->sizelineinfo=n; + LoadVector(S,f->lineinfo,n,sizeof(int)); + n=LoadInt(S); + f->locvars=luaM_newvector(S->L,n,LocVar); + f->sizelocvars=n; + for (i=0; ilocvars[i].varname=NULL; + for (i=0; ilocvars[i].varname=LoadString(S); + f->locvars[i].startpc=LoadInt(S); + f->locvars[i].endpc=LoadInt(S); + } + n=LoadInt(S); + f->upvalues=luaM_newvector(S->L,n,TString*); + f->sizeupvalues=n; + for (i=0; iupvalues[i]=NULL; + for (i=0; iupvalues[i]=LoadString(S); +} + +static Proto* LoadFunction(LoadState* S, TString* p) +{ + Proto* f=luaF_newproto(S->L); + setptvalue2s(S->L,S->L->top,f); incr_top(S->L); + f->source=LoadString(S); if (f->source==NULL) f->source=p; + f->linedefined=LoadInt(S); + f->lastlinedefined=LoadInt(S); + f->nups=LoadByte(S); + f->numparams=LoadByte(S); + f->is_vararg=LoadByte(S); + f->maxstacksize=LoadByte(S); + LoadCode(S,f); + LoadConstants(S,f); + LoadDebug(S,f); + IF (!luaG_checkcode(f), "bad code"); + S->L->top--; + return f; +} + +static void LoadHeader(LoadState* S) +{ + char h[LUAC_HEADERSIZE]; + char s[LUAC_HEADERSIZE]; + luaU_header(h); + LoadBlock(S,s,LUAC_HEADERSIZE); + IF (memcmp(h,s,LUAC_HEADERSIZE)!=0, "bad header"); +} + +/* +** load precompiled chunk +*/ +Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name) +{ + LoadState S; + if (*name=='@' || *name=='=') + S.name=name+1; + else if (*name==LUA_SIGNATURE[0]) + S.name="binary string"; + else + S.name=name; + S.L=L; + S.Z=Z; + S.b=buff; + LoadHeader(&S); + return LoadFunction(&S,luaS_newliteral(L,"=?")); +} + +/* +* make header +*/ +void luaU_header (char* h) +{ + int x=1; + memcpy(h,LUA_SIGNATURE,sizeof(LUA_SIGNATURE)-1); + h+=sizeof(LUA_SIGNATURE)-1; + *h++=(char)LUAC_VERSION; + *h++=(char)LUAC_FORMAT; + *h++=(char)*(char*)&x; /* endianness */ + *h++=(char)sizeof(int); + *h++=(char)sizeof(size_t); + *h++=(char)sizeof(Instruction); + *h++=(char)sizeof(lua_Number); + *h++=(char)(((lua_Number)0.5)==0); /* is lua_Number integral? */ +} diff --git a/deps/lua/src/lundump.h b/deps/lua/src/lundump.h new file mode 100644 index 0000000000000000000000000000000000000000..58cca5d19083a235e835566f1867104f011aa70d --- /dev/null +++ b/deps/lua/src/lundump.h @@ -0,0 +1,36 @@ +/* +** $Id: lundump.h,v 1.40 2005/11/11 14:03:13 lhf Exp $ +** load precompiled Lua chunks +** See Copyright Notice in lua.h +*/ + +#ifndef lundump_h +#define lundump_h + +#include "lobject.h" +#include "lzio.h" + +/* load one chunk; from lundump.c */ +LUAI_FUNC Proto* luaU_undump (lua_State* L, ZIO* Z, Mbuffer* buff, const char* name); + +/* make header; from lundump.c */ +LUAI_FUNC void luaU_header (char* h); + +/* dump one chunk; from ldump.c */ +LUAI_FUNC int luaU_dump (lua_State* L, const Proto* f, lua_Writer w, void* data, int strip); + +#ifdef luac_c +/* print one chunk; from print.c */ +LUAI_FUNC void luaU_print (const Proto* f, int full); +#endif + +/* for header of binary files -- this is Lua 5.1 */ +#define LUAC_VERSION 0x51 + +/* for header of binary files -- this is the official format */ +#define LUAC_FORMAT 0 + +/* size of header of binary files */ +#define LUAC_HEADERSIZE 12 + +#endif diff --git a/deps/lua/src/lvm.c b/deps/lua/src/lvm.c new file mode 100644 index 0000000000000000000000000000000000000000..6f4c0291c970a0c49085797ef2e87565fada2cd3 --- /dev/null +++ b/deps/lua/src/lvm.c @@ -0,0 +1,762 @@ +/* +** $Id: lvm.c,v 2.62 2006/01/23 19:51:43 roberto Exp $ +** Lua virtual machine +** See Copyright Notice in lua.h +*/ + + +#include +#include +#include + +#define lvm_c +#define LUA_CORE + +#include "lua.h" + +#include "ldebug.h" +#include "ldo.h" +#include "lfunc.h" +#include "lgc.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lstate.h" +#include "lstring.h" +#include "ltable.h" +#include "ltm.h" +#include "lvm.h" + + + +/* limit for table tag-method chains (to avoid loops) */ +#define MAXTAGLOOP 100 + + +const TValue *luaV_tonumber (const TValue *obj, TValue *n) { + lua_Number num; + if (ttisnumber(obj)) return obj; + if (ttisstring(obj) && luaO_str2d(svalue(obj), &num)) { + setnvalue(n, num); + return n; + } + else + return NULL; +} + + +int luaV_tostring (lua_State *L, StkId obj) { + if (!ttisnumber(obj)) + return 0; + else { + char s[LUAI_MAXNUMBER2STR]; + lua_Number n = nvalue(obj); + lua_number2str(s, n); + setsvalue2s(L, obj, luaS_new(L, s)); + return 1; + } +} + + +static void traceexec (lua_State *L, const Instruction *pc) { + lu_byte mask = L->hookmask; + const Instruction *oldpc = L->savedpc; + L->savedpc = pc; + if (mask > LUA_MASKLINE) { /* instruction-hook set? */ + if (L->hookcount == 0) { + resethookcount(L); + luaD_callhook(L, LUA_HOOKCOUNT, -1); + } + } + if (mask & LUA_MASKLINE) { + Proto *p = ci_func(L->ci)->l.p; + int npc = pcRel(pc, p); + int newline = getline(p, npc); + /* call linehook when enter a new function, when jump back (loop), + or when enter a new line */ + if (npc == 0 || pc <= oldpc || newline != getline(p, pcRel(oldpc, p))) + luaD_callhook(L, LUA_HOOKLINE, newline); + } +} + + +static void callTMres (lua_State *L, StkId res, const TValue *f, + const TValue *p1, const TValue *p2) { + ptrdiff_t result = savestack(L, res); + setobj2s(L, L->top, f); /* push function */ + setobj2s(L, L->top+1, p1); /* 1st argument */ + setobj2s(L, L->top+2, p2); /* 2nd argument */ + luaD_checkstack(L, 3); + L->top += 3; + luaD_call(L, L->top - 3, 1); + res = restorestack(L, result); + L->top--; + setobjs2s(L, res, L->top); +} + + + +static void callTM (lua_State *L, const TValue *f, const TValue *p1, + const TValue *p2, const TValue *p3) { + setobj2s(L, L->top, f); /* push function */ + setobj2s(L, L->top+1, p1); /* 1st argument */ + setobj2s(L, L->top+2, p2); /* 2nd argument */ + setobj2s(L, L->top+3, p3); /* 3th argument */ + luaD_checkstack(L, 4); + L->top += 4; + luaD_call(L, L->top - 4, 0); +} + + +void luaV_gettable (lua_State *L, const TValue *t, TValue *key, StkId val) { + int loop; + for (loop = 0; loop < MAXTAGLOOP; loop++) { + const TValue *tm; + if (ttistable(t)) { /* `t' is a table? */ + Table *h = hvalue(t); + const TValue *res = luaH_get(h, key); /* do a primitive get */ + if (!ttisnil(res) || /* result is no nil? */ + (tm = fasttm(L, h->metatable, TM_INDEX)) == NULL) { /* or no TM? */ + setobj2s(L, val, res); + return; + } + /* else will try the tag method */ + } + else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_INDEX))) + luaG_typeerror(L, t, "index"); + if (ttisfunction(tm)) { + callTMres(L, val, tm, t, key); + return; + } + t = tm; /* else repeat with `tm' */ + } + luaG_runerror(L, "loop in gettable"); +} + + +void luaV_settable (lua_State *L, const TValue *t, TValue *key, StkId val) { + int loop; + for (loop = 0; loop < MAXTAGLOOP; loop++) { + const TValue *tm; + if (ttistable(t)) { /* `t' is a table? */ + Table *h = hvalue(t); + TValue *oldval = luaH_set(L, h, key); /* do a primitive set */ + if (!ttisnil(oldval) || /* result is no nil? */ + (tm = fasttm(L, h->metatable, TM_NEWINDEX)) == NULL) { /* or no TM? */ + setobj2t(L, oldval, val); + luaC_barriert(L, h, val); + return; + } + /* else will try the tag method */ + } + else if (ttisnil(tm = luaT_gettmbyobj(L, t, TM_NEWINDEX))) + luaG_typeerror(L, t, "index"); + if (ttisfunction(tm)) { + callTM(L, tm, t, key, val); + return; + } + t = tm; /* else repeat with `tm' */ + } + luaG_runerror(L, "loop in settable"); +} + + +static int call_binTM (lua_State *L, const TValue *p1, const TValue *p2, + StkId res, TMS event) { + const TValue *tm = luaT_gettmbyobj(L, p1, event); /* try first operand */ + if (ttisnil(tm)) + tm = luaT_gettmbyobj(L, p2, event); /* try second operand */ + if (!ttisfunction(tm)) return 0; + callTMres(L, res, tm, p1, p2); + return 1; +} + + +static const TValue *get_compTM (lua_State *L, Table *mt1, Table *mt2, + TMS event) { + const TValue *tm1 = fasttm(L, mt1, event); + const TValue *tm2; + if (tm1 == NULL) return NULL; /* no metamethod */ + if (mt1 == mt2) return tm1; /* same metatables => same metamethods */ + tm2 = fasttm(L, mt2, event); + if (tm2 == NULL) return NULL; /* no metamethod */ + if (luaO_rawequalObj(tm1, tm2)) /* same metamethods? */ + return tm1; + return NULL; +} + + +static int call_orderTM (lua_State *L, const TValue *p1, const TValue *p2, + TMS event) { + const TValue *tm1 = luaT_gettmbyobj(L, p1, event); + const TValue *tm2; + if (ttisnil(tm1)) return -1; /* no metamethod? */ + tm2 = luaT_gettmbyobj(L, p2, event); + if (!luaO_rawequalObj(tm1, tm2)) /* different metamethods? */ + return -1; + callTMres(L, L->top, tm1, p1, p2); + return !l_isfalse(L->top); +} + + +static int l_strcmp (const TString *ls, const TString *rs) { + const char *l = getstr(ls); + size_t ll = ls->tsv.len; + const char *r = getstr(rs); + size_t lr = rs->tsv.len; + for (;;) { + int temp = strcoll(l, r); + if (temp != 0) return temp; + else { /* strings are equal up to a `\0' */ + size_t len = strlen(l); /* index of first `\0' in both strings */ + if (len == lr) /* r is finished? */ + return (len == ll) ? 0 : 1; + else if (len == ll) /* l is finished? */ + return -1; /* l is smaller than r (because r is not finished) */ + /* both strings longer than `len'; go on comparing (after the `\0') */ + len++; + l += len; ll -= len; r += len; lr -= len; + } + } +} + + +int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r) { + int res; + if (ttype(l) != ttype(r)) + return luaG_ordererror(L, l, r); + else if (ttisnumber(l)) + return luai_numlt(nvalue(l), nvalue(r)); + else if (ttisstring(l)) + return l_strcmp(rawtsvalue(l), rawtsvalue(r)) < 0; + else if ((res = call_orderTM(L, l, r, TM_LT)) != -1) + return res; + return luaG_ordererror(L, l, r); +} + + +static int lessequal (lua_State *L, const TValue *l, const TValue *r) { + int res; + if (ttype(l) != ttype(r)) + return luaG_ordererror(L, l, r); + else if (ttisnumber(l)) + return luai_numle(nvalue(l), nvalue(r)); + else if (ttisstring(l)) + return l_strcmp(rawtsvalue(l), rawtsvalue(r)) <= 0; + else if ((res = call_orderTM(L, l, r, TM_LE)) != -1) /* first try `le' */ + return res; + else if ((res = call_orderTM(L, r, l, TM_LT)) != -1) /* else try `lt' */ + return !res; + return luaG_ordererror(L, l, r); +} + + +int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2) { + const TValue *tm; + lua_assert(ttype(t1) == ttype(t2)); + switch (ttype(t1)) { + case LUA_TNIL: return 1; + case LUA_TNUMBER: return luai_numeq(nvalue(t1), nvalue(t2)); + case LUA_TBOOLEAN: return bvalue(t1) == bvalue(t2); /* true must be 1 !! */ + case LUA_TLIGHTUSERDATA: return pvalue(t1) == pvalue(t2); + case LUA_TUSERDATA: { + if (uvalue(t1) == uvalue(t2)) return 1; + tm = get_compTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable, + TM_EQ); + break; /* will try TM */ + } + case LUA_TTABLE: { + if (hvalue(t1) == hvalue(t2)) return 1; + tm = get_compTM(L, hvalue(t1)->metatable, hvalue(t2)->metatable, TM_EQ); + break; /* will try TM */ + } + default: return gcvalue(t1) == gcvalue(t2); + } + if (tm == NULL) return 0; /* no TM? */ + callTMres(L, L->top, tm, t1, t2); /* call TM */ + return !l_isfalse(L->top); +} + + +void luaV_concat (lua_State *L, int total, int last) { + do { + StkId top = L->base + last + 1; + int n = 2; /* number of elements handled in this pass (at least 2) */ + if (!tostring(L, top-2) || !tostring(L, top-1)) { + if (!call_binTM(L, top-2, top-1, top-2, TM_CONCAT)) + luaG_concaterror(L, top-2, top-1); + } else if (tsvalue(top-1)->len > 0) { /* if len=0, do nothing */ + /* at least two string values; get as many as possible */ + size_t tl = tsvalue(top-1)->len; + char *buffer; + int i; + /* collect total length */ + for (n = 1; n < total && tostring(L, top-n-1); n++) { + size_t l = tsvalue(top-n-1)->len; + if (l >= MAX_SIZET - tl) luaG_runerror(L, "string length overflow"); + tl += l; + } + buffer = luaZ_openspace(L, &G(L)->buff, tl); + tl = 0; + for (i=n; i>0; i--) { /* concat all strings */ + size_t l = tsvalue(top-i)->len; + memcpy(buffer+tl, svalue(top-i), l); + tl += l; + } + setsvalue2s(L, top-n, luaS_newlstr(L, buffer, tl)); + } + total -= n-1; /* got `n' strings to create 1 new */ + last -= n-1; + } while (total > 1); /* repeat until only 1 result left */ +} + + +static void Arith (lua_State *L, StkId ra, const TValue *rb, + const TValue *rc, TMS op) { + TValue tempb, tempc; + const TValue *b, *c; + if ((b = luaV_tonumber(rb, &tempb)) != NULL && + (c = luaV_tonumber(rc, &tempc)) != NULL) { + lua_Number nb = nvalue(b), nc = nvalue(c); + switch (op) { + case TM_ADD: setnvalue(ra, luai_numadd(nb, nc)); break; + case TM_SUB: setnvalue(ra, luai_numsub(nb, nc)); break; + case TM_MUL: setnvalue(ra, luai_nummul(nb, nc)); break; + case TM_DIV: setnvalue(ra, luai_numdiv(nb, nc)); break; + case TM_MOD: setnvalue(ra, luai_nummod(nb, nc)); break; + case TM_POW: setnvalue(ra, luai_numpow(nb, nc)); break; + case TM_UNM: setnvalue(ra, luai_numunm(nb)); break; + default: lua_assert(0); break; + } + } + else if (!call_binTM(L, rb, rc, ra, op)) + luaG_aritherror(L, rb, rc); +} + + + +/* +** some macros for common tasks in `luaV_execute' +*/ + +#define runtime_check(L, c) { if (!(c)) break; } + +#define RA(i) (base+GETARG_A(i)) +/* to be used after possible stack reallocation */ +#define RB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgR, base+GETARG_B(i)) +#define RC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgR, base+GETARG_C(i)) +#define RKB(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, \ + ISK(GETARG_B(i)) ? k+INDEXK(GETARG_B(i)) : base+GETARG_B(i)) +#define RKC(i) check_exp(getCMode(GET_OPCODE(i)) == OpArgK, \ + ISK(GETARG_C(i)) ? k+INDEXK(GETARG_C(i)) : base+GETARG_C(i)) +#define KBx(i) check_exp(getBMode(GET_OPCODE(i)) == OpArgK, k+GETARG_Bx(i)) + + +#define dojump(L,pc,i) {(pc) += (i); luai_threadyield(L);} + + +#define Protect(x) { L->savedpc = pc; {x;}; base = L->base; } + + +#define arith_op(op,tm) { \ + TValue *rb = RKB(i); \ + TValue *rc = RKC(i); \ + if (ttisnumber(rb) && ttisnumber(rc)) { \ + lua_Number nb = nvalue(rb), nc = nvalue(rc); \ + setnvalue(ra, op(nb, nc)); \ + } \ + else \ + Protect(Arith(L, ra, rb, rc, tm)); \ + } + + + +void luaV_execute (lua_State *L, int nexeccalls) { + LClosure *cl; + StkId base; + TValue *k; + const Instruction *pc; + reentry: /* entry point */ + pc = L->savedpc; + cl = &clvalue(L->ci->func)->l; + base = L->base; + k = cl->p->k; + /* main loop of interpreter */ + for (;;) { + const Instruction i = *pc++; + StkId ra; + if ((L->hookmask & (LUA_MASKLINE | LUA_MASKCOUNT)) && + (--L->hookcount == 0 || L->hookmask & LUA_MASKLINE)) { + traceexec(L, pc); + if (L->status == LUA_YIELD) { /* did hook yield? */ + L->savedpc = pc - 1; + return; + } + base = L->base; + } + /* warning!! several calls may realloc the stack and invalidate `ra' */ + ra = RA(i); + lua_assert(base == L->base && L->base == L->ci->base); + lua_assert(base <= L->top && L->top <= L->stack + L->stacksize); + lua_assert(L->top == L->ci->top || luaG_checkopenop(i)); + switch (GET_OPCODE(i)) { + case OP_MOVE: { + setobjs2s(L, ra, RB(i)); + continue; + } + case OP_LOADK: { + setobj2s(L, ra, KBx(i)); + continue; + } + case OP_LOADBOOL: { + setbvalue(ra, GETARG_B(i)); + if (GETARG_C(i)) pc++; /* skip next instruction (if C) */ + continue; + } + case OP_LOADNIL: { + TValue *rb = RB(i); + do { + setnilvalue(rb--); + } while (rb >= ra); + continue; + } + case OP_GETUPVAL: { + int b = GETARG_B(i); + setobj2s(L, ra, cl->upvals[b]->v); + continue; + } + case OP_GETGLOBAL: { + TValue g; + TValue *rb = KBx(i); + sethvalue(L, &g, cl->env); + lua_assert(ttisstring(rb)); + Protect(luaV_gettable(L, &g, rb, ra)); + continue; + } + case OP_GETTABLE: { + Protect(luaV_gettable(L, RB(i), RKC(i), ra)); + continue; + } + case OP_SETGLOBAL: { + TValue g; + sethvalue(L, &g, cl->env); + lua_assert(ttisstring(KBx(i))); + Protect(luaV_settable(L, &g, KBx(i), ra)); + continue; + } + case OP_SETUPVAL: { + UpVal *uv = cl->upvals[GETARG_B(i)]; + setobj(L, uv->v, ra); + luaC_barrier(L, uv, ra); + continue; + } + case OP_SETTABLE: { + Protect(luaV_settable(L, ra, RKB(i), RKC(i))); + continue; + } + case OP_NEWTABLE: { + int b = GETARG_B(i); + int c = GETARG_C(i); + sethvalue(L, ra, luaH_new(L, luaO_fb2int(b), luaO_fb2int(c))); + Protect(luaC_checkGC(L)); + continue; + } + case OP_SELF: { + StkId rb = RB(i); + setobjs2s(L, ra+1, rb); + Protect(luaV_gettable(L, rb, RKC(i), ra)); + continue; + } + case OP_ADD: { + arith_op(luai_numadd, TM_ADD); + continue; + } + case OP_SUB: { + arith_op(luai_numsub, TM_SUB); + continue; + } + case OP_MUL: { + arith_op(luai_nummul, TM_MUL); + continue; + } + case OP_DIV: { + arith_op(luai_numdiv, TM_DIV); + continue; + } + case OP_MOD: { + arith_op(luai_nummod, TM_MOD); + continue; + } + case OP_POW: { + arith_op(luai_numpow, TM_POW); + continue; + } + case OP_UNM: { + TValue *rb = RB(i); + if (ttisnumber(rb)) { + lua_Number nb = nvalue(rb); + setnvalue(ra, luai_numunm(nb)); + } + else { + Protect(Arith(L, ra, rb, rb, TM_UNM)); + } + continue; + } + case OP_NOT: { + int res = l_isfalse(RB(i)); /* next assignment may change this value */ + setbvalue(ra, res); + continue; + } + case OP_LEN: { + const TValue *rb = RB(i); + switch (ttype(rb)) { + case LUA_TTABLE: { + setnvalue(ra, cast_num(luaH_getn(hvalue(rb)))); + break; + } + case LUA_TSTRING: { + setnvalue(ra, cast_num(tsvalue(rb)->len)); + break; + } + default: { /* try metamethod */ + Protect( + if (!call_binTM(L, rb, luaO_nilobject, ra, TM_LEN)) + luaG_typeerror(L, rb, "get length of"); + ) + } + } + continue; + } + case OP_CONCAT: { + int b = GETARG_B(i); + int c = GETARG_C(i); + Protect(luaV_concat(L, c-b+1, c); luaC_checkGC(L)); + setobjs2s(L, RA(i), base+b); + continue; + } + case OP_JMP: { + dojump(L, pc, GETARG_sBx(i)); + continue; + } + case OP_EQ: { + TValue *rb = RKB(i); + TValue *rc = RKC(i); + Protect( + if (equalobj(L, rb, rc) == GETARG_A(i)) + dojump(L, pc, GETARG_sBx(*pc)); + ) + pc++; + continue; + } + case OP_LT: { + Protect( + if (luaV_lessthan(L, RKB(i), RKC(i)) == GETARG_A(i)) + dojump(L, pc, GETARG_sBx(*pc)); + ) + pc++; + continue; + } + case OP_LE: { + Protect( + if (lessequal(L, RKB(i), RKC(i)) == GETARG_A(i)) + dojump(L, pc, GETARG_sBx(*pc)); + ) + pc++; + continue; + } + case OP_TEST: { + if (l_isfalse(ra) != GETARG_C(i)) + dojump(L, pc, GETARG_sBx(*pc)); + pc++; + continue; + } + case OP_TESTSET: { + TValue *rb = RB(i); + if (l_isfalse(rb) != GETARG_C(i)) { + setobjs2s(L, ra, rb); + dojump(L, pc, GETARG_sBx(*pc)); + } + pc++; + continue; + } + case OP_CALL: { + int b = GETARG_B(i); + int nresults = GETARG_C(i) - 1; + if (b != 0) L->top = ra+b; /* else previous instruction set top */ + L->savedpc = pc; + switch (luaD_precall(L, ra, nresults)) { + case PCRLUA: { + nexeccalls++; + goto reentry; /* restart luaV_execute over new Lua function */ + } + case PCRC: { + /* it was a C function (`precall' called it); adjust results */ + if (nresults >= 0) L->top = L->ci->top; + base = L->base; + continue; + } + default: { + return; /* yield */ + } + } + } + case OP_TAILCALL: { + int b = GETARG_B(i); + if (b != 0) L->top = ra+b; /* else previous instruction set top */ + L->savedpc = pc; + lua_assert(GETARG_C(i) - 1 == LUA_MULTRET); + switch (luaD_precall(L, ra, LUA_MULTRET)) { + case PCRLUA: { + /* tail call: put new frame in place of previous one */ + CallInfo *ci = L->ci - 1; /* previous frame */ + int aux; + StkId func = ci->func; + StkId pfunc = (ci+1)->func; /* previous function index */ + if (L->openupval) luaF_close(L, ci->base); + L->base = ci->base = ci->func + ((ci+1)->base - pfunc); + for (aux = 0; pfunc+aux < L->top; aux++) /* move frame down */ + setobjs2s(L, func+aux, pfunc+aux); + ci->top = L->top = func+aux; /* correct top */ + lua_assert(L->top == L->base + clvalue(func)->l.p->maxstacksize); + ci->savedpc = L->savedpc; + ci->tailcalls++; /* one more call lost */ + L->ci--; /* remove new frame */ + goto reentry; + } + case PCRC: { /* it was a C function (`precall' called it) */ + base = L->base; + continue; + } + default: { + return; /* yield */ + } + } + } + case OP_RETURN: { + int b = GETARG_B(i); + if (b != 0) L->top = ra+b-1; + if (L->openupval) luaF_close(L, base); + L->savedpc = pc; + b = luaD_poscall(L, ra); + if (--nexeccalls == 0) /* was previous function running `here'? */ + return; /* no: return */ + else { /* yes: continue its execution */ + if (b) L->top = L->ci->top; + lua_assert(isLua(L->ci)); + lua_assert(GET_OPCODE(*((L->ci)->savedpc - 1)) == OP_CALL); + goto reentry; + } + } + case OP_FORLOOP: { + lua_Number step = nvalue(ra+2); + lua_Number idx = luai_numadd(nvalue(ra), step); /* increment index */ + lua_Number limit = nvalue(ra+1); + if (luai_numlt(0, step) ? luai_numle(idx, limit) + : luai_numle(limit, idx)) { + dojump(L, pc, GETARG_sBx(i)); /* jump back */ + setnvalue(ra, idx); /* update internal index... */ + setnvalue(ra+3, idx); /* ...and external index */ + } + continue; + } + case OP_FORPREP: { + const TValue *init = ra; + const TValue *plimit = ra+1; + const TValue *pstep = ra+2; + L->savedpc = pc; /* next steps may throw errors */ + if (!tonumber(init, ra)) + luaG_runerror(L, LUA_QL("for") " initial value must be a number"); + else if (!tonumber(plimit, ra+1)) + luaG_runerror(L, LUA_QL("for") " limit must be a number"); + else if (!tonumber(pstep, ra+2)) + luaG_runerror(L, LUA_QL("for") " step must be a number"); + setnvalue(ra, luai_numsub(nvalue(ra), nvalue(pstep))); + dojump(L, pc, GETARG_sBx(i)); + continue; + } + case OP_TFORLOOP: { + StkId cb = ra + 3; /* call base */ + setobjs2s(L, cb+2, ra+2); + setobjs2s(L, cb+1, ra+1); + setobjs2s(L, cb, ra); + L->top = cb+3; /* func. + 2 args (state and index) */ + Protect(luaD_call(L, cb, GETARG_C(i))); + L->top = L->ci->top; + cb = RA(i) + 3; /* previous call may change the stack */ + if (!ttisnil(cb)) { /* continue loop? */ + setobjs2s(L, cb-1, cb); /* save control variable */ + dojump(L, pc, GETARG_sBx(*pc)); /* jump back */ + } + pc++; + continue; + } + case OP_SETLIST: { + int n = GETARG_B(i); + int c = GETARG_C(i); + int last; + Table *h; + if (n == 0) { + n = cast_int(L->top - ra) - 1; + L->top = L->ci->top; + } + if (c == 0) c = cast_int(*pc++); + runtime_check(L, ttistable(ra)); + h = hvalue(ra); + last = ((c-1)*LFIELDS_PER_FLUSH) + n; + if (last > h->sizearray) /* needs more space? */ + luaH_resizearray(L, h, last); /* pre-alloc it at once */ + for (; n > 0; n--) { + TValue *val = ra+n; + setobj2t(L, luaH_setnum(L, h, last--), val); + luaC_barriert(L, h, val); + } + continue; + } + case OP_CLOSE: { + luaF_close(L, ra); + continue; + } + case OP_CLOSURE: { + Proto *p; + Closure *ncl; + int nup, j; + p = cl->p->p[GETARG_Bx(i)]; + nup = p->nups; + ncl = luaF_newLclosure(L, nup, cl->env); + ncl->l.p = p; + for (j=0; jl.upvals[j] = cl->upvals[GETARG_B(*pc)]; + else { + lua_assert(GET_OPCODE(*pc) == OP_MOVE); + ncl->l.upvals[j] = luaF_findupval(L, base + GETARG_B(*pc)); + } + } + setclvalue(L, ra, ncl); + Protect(luaC_checkGC(L)); + continue; + } + case OP_VARARG: { + int b = GETARG_B(i) - 1; + int j; + CallInfo *ci = L->ci; + int n = cast_int(ci->base - ci->func) - cl->p->numparams - 1; + if (b == LUA_MULTRET) { + Protect(luaD_checkstack(L, n)); + ra = RA(i); /* previous call may change the stack */ + b = n; + L->top = ra + n; + } + for (j = 0; j < b; j++) { + if (j < n) { + setobjs2s(L, ra + j, ci->base - n + j); + } + else { + setnilvalue(ra + j); + } + } + continue; + } + } + } +} + diff --git a/deps/lua/src/lvm.h b/deps/lua/src/lvm.h new file mode 100644 index 0000000000000000000000000000000000000000..788423f8e31f8e9bcf30415584faaec768ea767a --- /dev/null +++ b/deps/lua/src/lvm.h @@ -0,0 +1,36 @@ +/* +** $Id: lvm.h,v 2.5 2005/08/22 18:54:49 roberto Exp $ +** Lua virtual machine +** See Copyright Notice in lua.h +*/ + +#ifndef lvm_h +#define lvm_h + + +#include "ldo.h" +#include "lobject.h" +#include "ltm.h" + + +#define tostring(L,o) ((ttype(o) == LUA_TSTRING) || (luaV_tostring(L, o))) + +#define tonumber(o,n) (ttype(o) == LUA_TNUMBER || \ + (((o) = luaV_tonumber(o,n)) != NULL)) + +#define equalobj(L,o1,o2) \ + (ttype(o1) == ttype(o2) && luaV_equalval(L, o1, o2)) + + +LUAI_FUNC int luaV_lessthan (lua_State *L, const TValue *l, const TValue *r); +LUAI_FUNC int luaV_equalval (lua_State *L, const TValue *t1, const TValue *t2); +LUAI_FUNC const TValue *luaV_tonumber (const TValue *obj, TValue *n); +LUAI_FUNC int luaV_tostring (lua_State *L, StkId obj); +LUAI_FUNC void luaV_gettable (lua_State *L, const TValue *t, TValue *key, + StkId val); +LUAI_FUNC void luaV_settable (lua_State *L, const TValue *t, TValue *key, + StkId val); +LUAI_FUNC void luaV_execute (lua_State *L, int nexeccalls); +LUAI_FUNC void luaV_concat (lua_State *L, int total, int last); + +#endif diff --git a/deps/lua/src/lzio.c b/deps/lua/src/lzio.c new file mode 100644 index 0000000000000000000000000000000000000000..5121ada8466e75cfe60cf9cde8282c30499892b8 --- /dev/null +++ b/deps/lua/src/lzio.c @@ -0,0 +1,82 @@ +/* +** $Id: lzio.c,v 1.31 2005/06/03 20:15:29 roberto Exp $ +** a generic input stream interface +** See Copyright Notice in lua.h +*/ + + +#include + +#define lzio_c +#define LUA_CORE + +#include "lua.h" + +#include "llimits.h" +#include "lmem.h" +#include "lstate.h" +#include "lzio.h" + + +int luaZ_fill (ZIO *z) { + size_t size; + lua_State *L = z->L; + const char *buff; + lua_unlock(L); + buff = z->reader(L, z->data, &size); + lua_lock(L); + if (buff == NULL || size == 0) return EOZ; + z->n = size - 1; + z->p = buff; + return char2int(*(z->p++)); +} + + +int luaZ_lookahead (ZIO *z) { + if (z->n == 0) { + if (luaZ_fill(z) == EOZ) + return EOZ; + else { + z->n++; /* luaZ_fill removed first byte; put back it */ + z->p--; + } + } + return char2int(*z->p); +} + + +void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader, void *data) { + z->L = L; + z->reader = reader; + z->data = data; + z->n = 0; + z->p = NULL; +} + + +/* --------------------------------------------------------------- read --- */ +size_t luaZ_read (ZIO *z, void *b, size_t n) { + while (n) { + size_t m; + if (luaZ_lookahead(z) == EOZ) + return n; /* return number of missing bytes */ + m = (n <= z->n) ? n : z->n; /* min. between n and z->n */ + memcpy(b, z->p, m); + z->n -= m; + z->p += m; + b = (char *)b + m; + n -= m; + } + return 0; +} + +/* ------------------------------------------------------------------------ */ +char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n) { + if (n > buff->buffsize) { + if (n < LUA_MINBUFFER) n = LUA_MINBUFFER; + luaZ_resizebuffer(L, buff, n); + } + return buff->buffer; +} + + diff --git a/deps/lua/src/lzio.h b/deps/lua/src/lzio.h new file mode 100644 index 0000000000000000000000000000000000000000..8f403b8e74f5e4562328ebfa76f9c0ef4c77266d --- /dev/null +++ b/deps/lua/src/lzio.h @@ -0,0 +1,67 @@ +/* +** $Id: lzio.h,v 1.21 2005/05/17 19:49:15 roberto Exp $ +** Buffered streams +** See Copyright Notice in lua.h +*/ + + +#ifndef lzio_h +#define lzio_h + +#include "lua.h" + +#include "lmem.h" + + +#define EOZ (-1) /* end of stream */ + +typedef struct Zio ZIO; + +#define char2int(c) cast(int, cast(unsigned char, (c))) + +#define zgetc(z) (((z)->n--)>0 ? char2int(*(z)->p++) : luaZ_fill(z)) + +typedef struct Mbuffer { + char *buffer; + size_t n; + size_t buffsize; +} Mbuffer; + +#define luaZ_initbuffer(L, buff) ((buff)->buffer = NULL, (buff)->buffsize = 0) + +#define luaZ_buffer(buff) ((buff)->buffer) +#define luaZ_sizebuffer(buff) ((buff)->buffsize) +#define luaZ_bufflen(buff) ((buff)->n) + +#define luaZ_resetbuffer(buff) ((buff)->n = 0) + + +#define luaZ_resizebuffer(L, buff, size) \ + (luaM_reallocvector(L, (buff)->buffer, (buff)->buffsize, size, char), \ + (buff)->buffsize = size) + +#define luaZ_freebuffer(L, buff) luaZ_resizebuffer(L, buff, 0) + + +LUAI_FUNC char *luaZ_openspace (lua_State *L, Mbuffer *buff, size_t n); +LUAI_FUNC void luaZ_init (lua_State *L, ZIO *z, lua_Reader reader, + void *data); +LUAI_FUNC size_t luaZ_read (ZIO* z, void* b, size_t n); /* read next n bytes */ +LUAI_FUNC int luaZ_lookahead (ZIO *z); + + + +/* --------- Private Part ------------------ */ + +struct Zio { + size_t n; /* bytes still unread */ + const char *p; /* current position in buffer */ + lua_Reader reader; + void* data; /* additional data */ + lua_State *L; /* Lua state (for reader) */ +}; + + +LUAI_FUNC int luaZ_fill (ZIO *z); + +#endif diff --git a/deps/lua/src/print.c b/deps/lua/src/print.c new file mode 100644 index 0000000000000000000000000000000000000000..1c3a4457c498c68bf58b3f790aaae76610cafa8c --- /dev/null +++ b/deps/lua/src/print.c @@ -0,0 +1,224 @@ +/* +** $Id: print.c,v 1.54 2006/01/11 22:49:27 lhf Exp $ +** print bytecodes +** See Copyright Notice in lua.h +*/ + +#include +#include + +#define luac_c +#define LUA_CORE + +#include "ldebug.h" +#include "lobject.h" +#include "lopcodes.h" +#include "lundump.h" + +#define PrintFunction luaU_print + +#define Sizeof(x) ((int)sizeof(x)) +#define VOID(p) ((const void*)(p)) + +static void PrintString(const Proto* f, int n) +{ + const char* s=svalue(&f->k[n]); + putchar('"'); + for (; *s; s++) + { + switch (*s) + { + case '"': printf("\\\""); break; + case '\a': printf("\\a"); break; + case '\b': printf("\\b"); break; + case '\f': printf("\\f"); break; + case '\n': printf("\\n"); break; + case '\r': printf("\\r"); break; + case '\t': printf("\\t"); break; + case '\v': printf("\\v"); break; + default: if (isprint((unsigned char)*s)) + printf("%c",*s); + else + printf("\\%03u",(unsigned char)*s); + } + } + putchar('"'); +} + +static void PrintConstant(const Proto* f, int i) +{ + const TValue* o=&f->k[i]; + switch (ttype(o)) + { + case LUA_TNIL: + printf("nil"); + break; + case LUA_TBOOLEAN: + printf(bvalue(o) ? "true" : "false"); + break; + case LUA_TNUMBER: + printf(LUA_NUMBER_FMT,nvalue(o)); + break; + case LUA_TSTRING: + PrintString(f,i); + break; + default: /* cannot happen */ + printf("? type=%d",ttype(o)); + break; + } +} + +static void PrintCode(const Proto* f) +{ + const Instruction* code=f->code; + int pc,n=f->sizecode; + for (pc=0; pc0) printf("[%d]\t",line); else printf("[-]\t"); + printf("%-9s\t",luaP_opnames[o]); + switch (getOpMode(o)) + { + case iABC: + printf("%d",a); + if (getBMode(o)!=OpArgN) printf(" %d",ISK(b) ? (-1-INDEXK(b)) : b); + if (getCMode(o)!=OpArgN) printf(" %d",ISK(c) ? (-1-INDEXK(c)) : c); + break; + case iABx: + if (getBMode(o)==OpArgK) printf("%d %d",a,-1-bx); else printf("%d %d",a,bx); + break; + case iAsBx: + if (o==OP_JMP) printf("%d",sbx); else printf("%d %d",a,sbx); + break; + } + switch (o) + { + case OP_LOADK: + printf("\t; "); PrintConstant(f,bx); + break; + case OP_GETUPVAL: + case OP_SETUPVAL: + printf("\t; %s", (f->sizeupvalues>0) ? getstr(f->upvalues[b]) : "-"); + break; + case OP_GETGLOBAL: + case OP_SETGLOBAL: + printf("\t; %s",svalue(&f->k[bx])); + break; + case OP_GETTABLE: + case OP_SELF: + if (ISK(c)) { printf("\t; "); PrintConstant(f,INDEXK(c)); } + break; + case OP_SETTABLE: + case OP_ADD: + case OP_SUB: + case OP_MUL: + case OP_DIV: + case OP_POW: + case OP_EQ: + case OP_LT: + case OP_LE: + if (ISK(b) || ISK(c)) + { + printf("\t; "); + if (ISK(b)) PrintConstant(f,INDEXK(b)); else printf("-"); + printf(" "); + if (ISK(c)) PrintConstant(f,INDEXK(c)); else printf("-"); + } + break; + case OP_JMP: + case OP_FORLOOP: + case OP_FORPREP: + printf("\t; to %d",sbx+pc+2); + break; + case OP_CLOSURE: + printf("\t; %p",VOID(f->p[bx])); + break; + case OP_SETLIST: + if (c==0) printf("\t; %d",(int)code[++pc]); + else printf("\t; %d",c); + break; + default: + break; + } + printf("\n"); + } +} + +#define SS(x) (x==1)?"":"s" +#define S(x) x,SS(x) + +static void PrintHeader(const Proto* f) +{ + const char* s=getstr(f->source); + if (*s=='@' || *s=='=') + s++; + else if (*s==LUA_SIGNATURE[0]) + s="(bstring)"; + else + s="(string)"; + printf("\n%s <%s:%d,%d> (%d instruction%s, %d bytes at %p)\n", + (f->linedefined==0)?"main":"function",s, + f->linedefined,f->lastlinedefined, + S(f->sizecode),f->sizecode*Sizeof(Instruction),VOID(f)); + printf("%d%s param%s, %d slot%s, %d upvalue%s, ", + f->numparams,f->is_vararg?"+":"",SS(f->numparams), + S(f->maxstacksize),S(f->nups)); + printf("%d local%s, %d constant%s, %d function%s\n", + S(f->sizelocvars),S(f->sizek),S(f->sizep)); +} + +static void PrintConstants(const Proto* f) +{ + int i,n=f->sizek; + printf("constants (%d) for %p:\n",n,VOID(f)); + for (i=0; isizelocvars; + printf("locals (%d) for %p:\n",n,VOID(f)); + for (i=0; ilocvars[i].varname),f->locvars[i].startpc+1,f->locvars[i].endpc+1); + } +} + +static void PrintUpvalues(const Proto* f) +{ + int i,n=f->sizeupvalues; + printf("upvalues (%d) for %p:\n",n,VOID(f)); + if (f->upvalues==NULL) return; + for (i=0; iupvalues[i])); + } +} + +void PrintFunction(const Proto* f, int full) +{ + int i,n=f->sizep; + PrintHeader(f); + PrintCode(f); + if (full) + { + PrintConstants(f); + PrintLocals(f); + PrintUpvalues(f); + } + for (i=0; ip[i],full); +} diff --git a/deps/lua/test/README b/deps/lua/test/README new file mode 100644 index 0000000000000000000000000000000000000000..0c7f38bc25bdf6d1d336e60c12abed960dc961e8 --- /dev/null +++ b/deps/lua/test/README @@ -0,0 +1,26 @@ +These are simple tests for Lua. Some of them contain useful code. +They are meant to be run to make sure Lua is built correctly and also +to be read, to see how Lua programs look. + +Here is a one-line summary of each program: + + bisect.lua bisection method for solving non-linear equations + cf.lua temperature conversion table (celsius to farenheit) + echo.lua echo command line arguments + env.lua environment variables as automatic global variables + factorial.lua factorial without recursion + fib.lua fibonacci function with cache + fibfor.lua fibonacci numbers with coroutines and generators + globals.lua report global variable usage + hello.lua the first program in every language + life.lua Conway's Game of Life + luac.lua bare-bones luac + printf.lua an implementation of printf + readonly.lua make global variables readonly + sieve.lua the sieve of of Eratosthenes programmed with coroutines + sort.lua two implementations of a sort function + table.lua make table, grouping all data for the same item + trace-calls.lua trace calls + trace-globals.lua trace assigments to global variables + xd.lua hex dump + diff --git a/deps/lua/test/bisect.lua b/deps/lua/test/bisect.lua new file mode 100644 index 0000000000000000000000000000000000000000..f91e69bfbaf6710cc4ec99fee38aa37631c964de --- /dev/null +++ b/deps/lua/test/bisect.lua @@ -0,0 +1,27 @@ +-- bisection method for solving non-linear equations + +delta=1e-6 -- tolerance + +function bisect(f,a,b,fa,fb) + local c=(a+b)/2 + io.write(n," c=",c," a=",a," b=",b,"\n") + if c==a or c==b or math.abs(a-b) posted to lua-l +-- modified to use ANSI terminal escape sequences +-- modified to use for instead of while + +local write=io.write + +ALIVE="" DEAD="" +ALIVE="O" DEAD="-" + +function delay() -- NOTE: SYSTEM-DEPENDENT, adjust as necessary + for i=1,10000 do end + -- local i=os.clock()+1 while(os.clock() 0 do + local xm1,x,xp1,xi=self.w-1,self.w,1,self.w + while xi > 0 do + local sum = self[ym1][xm1] + self[ym1][x] + self[ym1][xp1] + + self[y][xm1] + self[y][xp1] + + self[yp1][xm1] + self[yp1][x] + self[yp1][xp1] + next[y][x] = ((sum==2) and self[y][x]) or ((sum==3) and 1) or 0 + xm1,x,xp1,xi = x,xp1,xp1+1,xi-1 + end + ym1,y,yp1,yi = y,yp1,yp1+1,yi-1 + end +end + +-- output the array to screen +function _CELLS:draw() + local out="" -- accumulate to reduce flicker + for y=1,self.h do + for x=1,self.w do + out=out..(((self[y][x]>0) and ALIVE) or DEAD) + end + out=out.."\n" + end + write(out) +end + +-- constructor +function CELLS(w,h) + local c = ARRAY2D(w,h) + c.spawn = _CELLS.spawn + c.evolve = _CELLS.evolve + c.draw = _CELLS.draw + return c +end + +-- +-- shapes suitable for use with spawn() above +-- +HEART = { 1,0,1,1,0,1,1,1,1; w=3,h=3 } +GLIDER = { 0,0,1,1,0,1,0,1,1; w=3,h=3 } +EXPLODE = { 0,1,0,1,1,1,1,0,1,0,1,0; w=3,h=4 } +FISH = { 0,1,1,1,1,1,0,0,0,1,0,0,0,0,1,1,0,0,1,0; w=5,h=4 } +BUTTERFLY = { 1,0,0,0,1,0,1,1,1,0,1,0,0,0,1,1,0,1,0,1,1,0,0,0,1; w=5,h=5 } + +-- the main routine +function LIFE(w,h) + -- create two arrays + local thisgen = CELLS(w,h) + local nextgen = CELLS(w,h) + + -- create some life + -- about 1000 generations of fun, then a glider steady-state + thisgen:spawn(GLIDER,5,4) + thisgen:spawn(EXPLODE,25,10) + thisgen:spawn(FISH,4,12) + + -- run until break + local gen=1 + write("\027[2J") -- ANSI clear screen + while 1 do + thisgen:evolve(nextgen) + thisgen,nextgen = nextgen,thisgen + write("\027[H") -- ANSI home cursor + thisgen:draw() + write("Life - generation ",gen,"\n") + gen=gen+1 + if gen>2000 then break end + --delay() -- no delay + end +end + +LIFE(40,20) diff --git a/deps/lua/test/luac.lua b/deps/lua/test/luac.lua new file mode 100644 index 0000000000000000000000000000000000000000..96a0a97ce7aa2704c9b8b409bcc14f1a80c746ca --- /dev/null +++ b/deps/lua/test/luac.lua @@ -0,0 +1,7 @@ +-- bare-bones luac in Lua +-- usage: lua luac.lua file.lua + +assert(arg[1]~=nil and arg[2]==nil,"usage: lua luac.lua file.lua") +f=assert(io.open("luac.out","wb")) +assert(f:write(string.dump(assert(loadfile(arg[1]))))) +assert(f:close()) diff --git a/deps/lua/test/printf.lua b/deps/lua/test/printf.lua new file mode 100644 index 0000000000000000000000000000000000000000..58c63ff5184e4f4274f9e0408a2959526c365ac0 --- /dev/null +++ b/deps/lua/test/printf.lua @@ -0,0 +1,7 @@ +-- an implementation of printf + +function printf(...) + io.write(string.format(...)) +end + +printf("Hello %s from %s on %s\n",os.getenv"USER" or "there",_VERSION,os.date()) diff --git a/deps/lua/test/readonly.lua b/deps/lua/test/readonly.lua new file mode 100644 index 0000000000000000000000000000000000000000..85c0b4e01324d0cf5f87495d44a6da26ad4cce96 --- /dev/null +++ b/deps/lua/test/readonly.lua @@ -0,0 +1,12 @@ +-- make global variables readonly + +local f=function (t,i) error("cannot redefine global variable `"..i.."'",2) end +local g={} +local G=getfenv() +setmetatable(g,{__index=G,__newindex=f}) +setfenv(1,g) + +-- an example +rawset(g,"x",3) +x=2 +y=1 -- cannot redefine `y' diff --git a/deps/lua/test/sieve.lua b/deps/lua/test/sieve.lua new file mode 100644 index 0000000000000000000000000000000000000000..0871bb212592726d5cca2c9478e9fcaf12c8ff09 --- /dev/null +++ b/deps/lua/test/sieve.lua @@ -0,0 +1,29 @@ +-- the sieve of of Eratosthenes programmed with coroutines +-- typical usage: lua -e N=1000 sieve.lua | column + +-- generate all the numbers from 2 to n +function gen (n) + return coroutine.wrap(function () + for i=2,n do coroutine.yield(i) end + end) +end + +-- filter the numbers generated by `g', removing multiples of `p' +function filter (p, g) + return coroutine.wrap(function () + while 1 do + local n = g() + if n == nil then return end + if math.mod(n, p) ~= 0 then coroutine.yield(n) end + end + end) +end + +N=N or 1000 -- from command line +x = gen(N) -- generate primes up to N +while 1 do + local n = x() -- pick a number until done + if n == nil then break end + print(n) -- must be a prime number + x = filter(n, x) -- now remove its multiples +end diff --git a/deps/lua/test/sort.lua b/deps/lua/test/sort.lua new file mode 100644 index 0000000000000000000000000000000000000000..0bcb15f837a7acd123b5426b3ecd90badbe5a6e1 --- /dev/null +++ b/deps/lua/test/sort.lua @@ -0,0 +1,66 @@ +-- two implementations of a sort function +-- this is an example only. Lua has now a built-in function "sort" + +-- extracted from Programming Pearls, page 110 +function qsort(x,l,u,f) + if ly end) + show("after reverse selection sort",x) + qsort(x,1,n,function (x,y) return x>> ",string.rep(" ",level)) + if t~=nil and t.currentline>=0 then io.write(t.short_src,":",t.currentline," ") end + t=debug.getinfo(2) + if event=="call" then + level=level+1 + else + level=level-1 if level<0 then level=0 end + end + if t.what=="main" then + if event=="call" then + io.write("begin ",t.short_src) + else + io.write("end ",t.short_src) + end + elseif t.what=="Lua" then +-- table.foreach(t,print) + io.write(event," ",t.name or "(Lua)"," <",t.linedefined,":",t.short_src,">") + else + io.write(event," ",t.name or "(C)"," [",t.what,"] ") + end + io.write("\n") +end + +debug.sethook(hook,"cr") +level=0 diff --git a/deps/lua/test/trace-globals.lua b/deps/lua/test/trace-globals.lua new file mode 100644 index 0000000000000000000000000000000000000000..295e670caa2bc0aa95ea822b2a68c4305f6d31f0 --- /dev/null +++ b/deps/lua/test/trace-globals.lua @@ -0,0 +1,38 @@ +-- trace assigments to global variables + +do + -- a tostring that quotes strings. note the use of the original tostring. + local _tostring=tostring + local tostring=function(a) + if type(a)=="string" then + return string.format("%q",a) + else + return _tostring(a) + end + end + + local log=function (name,old,new) + local t=debug.getinfo(3,"Sl") + local line=t.currentline + io.write(t.short_src) + if line>=0 then io.write(":",line) end + io.write(": ",name," is now ",tostring(new)," (was ",tostring(old),")","\n") + end + + local g={} + local set=function (t,name,value) + log(name,g[name],value) + g[name]=value + end + setmetatable(getfenv(),{__index=g,__newindex=set}) +end + +-- an example + +a=1 +b=2 +a=10 +b=20 +b=nil +b=200 +print(a,b,c) diff --git a/deps/lua/test/xd.lua b/deps/lua/test/xd.lua new file mode 100644 index 0000000000000000000000000000000000000000..ebc3effc06bfde46331640f4ddafcf47fea682fe --- /dev/null +++ b/deps/lua/test/xd.lua @@ -0,0 +1,14 @@ +-- hex dump +-- usage: lua xd.lua < file + +local offset=0 +while true do + local s=io.read(16) + if s==nil then return end + io.write(string.format("%08X ",offset)) + string.gsub(s,"(.)", + function (c) io.write(string.format("%02X ",string.byte(c))) end) + io.write(string.rep(" ",3*(16-string.len(s)))) + io.write(" ",string.gsub(s,"%c","."),"\n") + offset=offset+16 +end diff --git a/deps/pthread/CMakeLists.txt b/deps/pthread/CMakeLists.txt index 04e5be7472a9b8cbdb384348697b919bf2dd0ece..16d03f3590bf933c383dd1294b1117fd9f95ad7a 100644 --- a/deps/pthread/CMakeLists.txt +++ b/deps/pthread/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/regex/CMakeLists.txt b/deps/regex/CMakeLists.txt index 054b093d07c386d7ff9b0ffc4c05909d79b33129..05d01f02efa4c731bb67f6f5f654b499f6f2be03 100644 --- a/deps/regex/CMakeLists.txt +++ b/deps/regex/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/wepoll/CMakeLists.txt b/deps/wepoll/CMakeLists.txt index a81fd782bbc4b05a1158273a7fcc6701bc4d980d..e9b7749d82e381e7002f7bca65dc6d5a4e1a7740 100644 --- a/deps/wepoll/CMakeLists.txt +++ b/deps/wepoll/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt index f83aa70085491fb6575c0a6bf93252192cddd040..1220cc4246b4cef9b0709e2f14dec46ba787c4cc 100644 --- a/deps/zlib-1.2.11/CMakeLists.txt +++ b/deps/zlib-1.2.11/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_WINDOWS) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index 4c37ce598cd589b800efd532447797cfce568cd7..18bdc15d30430516c3ae6c847fc448477003dd66 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -42,7 +42,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [数据写入](/taos-sql#insert):支持单表单条、多条、多表多条写入,支持历史数据写入 * [数据查询](/taos-sql#select):支持时间段、值过滤、排序、查询结果手动分页等 * [SQL函数](/taos-sql#functions):支持各种聚合函数、选择函数、计算函数,如avg, min, diff等 -* [时间维度聚合](/taos-sql#aggregation):将表中数据按照时间段进行切割后聚合,降维处理 +* [窗口切分聚合](/taos-sql#aggregation):将表中数据按照时间段等方式进行切割后聚合,降维处理 * [边界限制](/taos-sql#limitation):库、表、SQL等边界限制条件 * [错误码](/taos-sql/error-code):TDengine 2.0 错误码以及对应的十进制码 @@ -63,7 +63,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ## [高级功能](/advanced-features) * [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算 -* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据 +* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):类似典型的消息队列,应用可订阅接收到的最新数据 * [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取 * [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送 @@ -106,6 +106,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [数据导入](/administrator#import):可按脚本文件导入,也可按数据文件导入 * [数据导出](/administrator#export):从shell按表导出,也可用taosdump工具做各种导出 * [系统监控](/administrator#status):检查系统现有的连接、查询、流式计算,日志和事件等 +* [性能优化](/administrator#optimize):对长期运行的系统进行维护优化,保障性能表现 * [文件目录结构](/administrator#directories):TDengine数据文件、配置文件等所在目录 * [参数限制与保留关键字](/administrator#keywords):TDengine的参数限制与保留关键字列表 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index 6eb58a1433ed0d43b313a9dc979ae5873ba00e8f..ab10b28fd3950bfa10e47113696de0829b2da74d 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -2,25 +2,25 @@ ## 快捷安装 -TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 +TDengine 软件分为服务器、客户端和报警模块三部分,目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后续会支持 Windows、Mac OS 等系统。客户端可以在 Windows 或 Linux 上安装和运行。任何 OS 的应用也可以选择 RESTful 接口连接服务器 taosd。CPU 支持 X64/ARM64/MIPS64/Alpha64,后续会支持 ARM32、RISC-V 等 CPU 架构。用户可根据需求选择通过 [源码](https://www.taosdata.com/cn/getting-started/#通过源码安装) 或者 [安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装) 来安装。 ### 通过源码安装 -请参考我们的[TDengine github主页](https://github.com/taosdata/TDengine)下载源码并安装. +请参考我们的 [TDengine github 主页](https://github.com/taosdata/TDengine) 下载源码并安装. -### 通过Docker容器运行 +### 通过 Docker 容器运行 -暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OSX 和 Windows 环境下尝试 TDengine。 +暂时不建议生产环境采用 Docker 来部署 TDengine 的客户端或服务端,但在开发环境下或初次尝试时,使用 Docker 方式部署是十分方便的。特别是,利用 Docker,可以方便地在 Mac OS X 和 Windows 环境下尝试 TDengine。 -详细操作方法请参照 [通过Docker快速体验TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。 +详细操作方法请参照 [通过 Docker 快速体验 TDengine](https://www.taosdata.com/cn/documentation/getting-started/docker)。 ### 通过安装包安装 -TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择: +TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择: -安装包下载在[这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。 +安装包下载在 [这里](https://www.taosdata.com/cn/getting-started/#通过安装包安装)。 -具体的安装过程,请参见[TDengine多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html)以及[视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。 +具体的安装过程,请参见 [TDengine 多种安装包的安装和卸载](https://www.taosdata.com/blog/2019/08/09/566.html) 以及 [视频教程](https://www.taosdata.com/blog/2020/11/11/1941.html)。 ## 轻松启动 @@ -53,21 +53,21 @@ $ systemctl status taosd 如果系统中不支持 systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。 -## TDengine命令行程序 +## TDengine 命令行程序 -执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。 +执行 TDengine 命令行程序,您只要在 Linux 终端执行 `taos` 即可。 ```bash $ taos ``` -如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/documentation/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: +如果 TDengine 终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](https://www.taosdata.com/cn/documentation/faq/) 来解决终端连接服务端失败的问题)。TDengine 终端的提示符号如下: ```cmd taos> ``` -在TDengine终端中,用户可以通过SQL命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的SQL语句需要以分号结束来运行。示例: +在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例: ```mysql create database demo; @@ -76,24 +76,24 @@ create table t (ts timestamp, speed int); insert into t values ('2019-07-15 00:00:00', 10); insert into t values ('2019-07-15 01:00:00', 20); select * from t; - ts | speed | -=================================== - 19-07-15 00:00:00.000| 10| - 19-07-15 01:00:00.000| 20| -Query OK, 2 row(s) in set (0.001700s) + ts | speed | +======================================== + 2019-07-15 00:00:00.000 | 10 | + 2019-07-15 01:00:00.000 | 20 | +Query OK, 2 row(s) in set (0.003128s) ``` -除执行SQL语句外,系统管理员还可以从TDengine终端检查系统运行状态,添加删除用户账号等。 +除执行 SQL 语句外,系统管理员还可以从 TDengine 终端检查系统运行状态,添加删除用户账号等。 ### 命令行参数 -您可通过配置命令行参数来改变TDengine终端的行为。以下为常用的几个命令行参数: +您可通过配置命令行参数来改变 TDengine 终端的行为。以下为常用的几个命令行参数: -- -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_ -- -h, --host: 指定服务的IP地址,默认为本地服务 -- -s, --commands: 在不进入终端的情况下运行TDengine命令 -- -u, -- user: 连接TDengine服务器的用户名,缺省为root -- -p, --password: 连接TDengine服务器的密码,缺省为taosdata +- -c, --config-dir: 指定配置文件目录,默认为 _/etc/taos_ +- -h, --host: 指定服务的 FQDN 地址(也可以使用 IP),默认为连接本地服务 +- -s, --commands: 在不进入终端的情况下运行 TDengine 命令 +- -u, --user: 连接 TDengine 服务器的用户名,缺省为 root +- -p, --password: 连接TDengine服务器的密码,缺省为 taosdata - -?, --help: 打印出所有命令行参数 示例: @@ -102,7 +102,7 @@ Query OK, 2 row(s) in set (0.001700s) $ taos -h 192.168.0.1 -s "use db; show tables;" ``` -### 运行SQL命令脚本 +### 运行 SQL 命令脚本 TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本. @@ -110,27 +110,27 @@ TDengine 终端可以通过 `source` 命令来运行 SQL 命令脚本. taos> source ; ``` -### Shell小技巧 +### Shell 小技巧 - 可以使用上下光标键查看历史输入的指令 -- 修改用户密码。在 shell 中使用 alter user 指令 +- 修改用户密码,在 shell 中使用 alter user 指令 - ctrl+c 中止正在进行中的查询 - 执行 `RESET QUERY CACHE` 清空本地缓存的表 schema ## TDengine 极速体验 -启动TDengine的服务,在Linux终端执行taosdemo +启动 TDengine 的服务,在 Linux 终端执行 taosdemo ```bash $ taosdemo ``` -该命令将在数据库test下面自动创建一张超级表meters,该超级表下有1万张表,表名为"t0" 到"t9999",每张表有10万条记录,每条记录有 (f1, f2, f3)三个字段,时间戳从"2017-07-14 10:40:00 000" 到"2017-07-14 10:41:39 999",每张表带有标签areaid和loc, areaid被设置为1到10, loc被设置为"beijing"或者“shanghai"。 +该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupdId,groupdId 被设置为 1 到 10, location 被设置为 "beijing" 或者 "shanghai"。 -执行这条命令大概需要10分钟,最后共插入10亿条记录。 +执行这条命令大概需要几分钟,最后共插入 1 亿条记录。 -在TDengine客户端输入查询命令,体验查询速度。 +在 TDengine 客户端输入查询命令,体验查询速度。 - 查询超级表下记录总条数: @@ -138,72 +138,64 @@ $ taosdemo taos> select count(*) from test.meters; ``` -- 查询10亿条记录的平均值、最大值、最小值等: +- 查询 1 亿条记录的平均值、最大值、最小值等: ```mysql -taos> select avg(f1), max(f2), min(f3) from test.meters; +taos> select avg(current), max(voltage), min(phase) from test.meters; ``` -- 查询loc="beijing"的记录总条数: +- 查询 location="beijing" 的记录总条数: ```mysql -taos> select count(*) from test.meters where loc="beijing"; +taos> select count(*) from test.meters where location="beijing"; ``` -- 查询areaid=10的所有记录的平均值、最大值、最小值等: +- 查询 groupdId=10 的所有记录的平均值、最大值、最小值等: ```mysql -taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10; +taos> select avg(current), max(voltage), min(phase) from test.meters where groupdId=10; ``` -- 对表t10按10s进行平均值、最大值和最小值聚合统计: +- 对表 d10 按 10s 进行平均值、最大值和最小值聚合统计: ```mysql -taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); +taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); ``` -**Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。 +**Note:** taosdemo 命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help` 详细列出。您可以设置不同参数进行体验。 ## 客户端和报警模块 -如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下: +如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux 和 Windows 安装包可以在 [这里](https://www.taosdata.com/cn/getting-started/#客户端) 下载。 -- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M) -- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M) -- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M) - -报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)): - -- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M) +报警模块的 Linux 和 Windows 安装包请在 [所有下载链接](https://www.taosdata.com/cn/all-downloads/) 页面搜索“TDengine Alert Linux”章节或“TDengine Alert Windows”章节进行下载。使用方法请参考 [报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)。 ## 支持平台列表 -### TDengine服务器支持的平台列表 +### TDengine 服务器支持的平台列表 | | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | | -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | | X64 | ● | ● | | ○ | ● | ● | ● | -| 树莓派 ARM32 | | ● | ● | | | | | | 龙芯 MIPS64 | | | ● | | | | | -| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | -| 申威 Alpha64 | | | ○ | ● | | | | +| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | +| 申威 Alpha64 | | | ○ | ● | | | | | 飞腾 ARM64 | | ○ 优麒麟 | | | | | | | 海光 X64 | ● | ● | ● | ○ | ● | ● | | -| 瑞芯微 ARM64/32 | | | ○ | | | | | -| 全志 ARM64/32 | | | ○ | | | | | -| 炬力 ARM64/32 | | | ○ | | | | | -| TI ARM32 | | | ○ | | | | | -| 华为云 ARM64 | | | | | | | ● | +| 瑞芯微 ARM64 | | | ○ | | | | | +| 全志 ARM64 | | | ○ | | | | | +| 炬力 ARM64 | | | ○ | | | | | +| 华为云 ARM64 | | | | | | | ● | 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 -### TDengine客户端和连接器支持的平台列表 +### TDengine 客户端和连接器支持的平台列表 -目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。 +目前 TDengine 的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha 等硬件平台,以及 Linux/Win64/Win32 等开发环境。 对照矩阵如下: @@ -220,5 +212,5 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 -请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。 +请跳转到 [连接器](https://www.taosdata.com/cn/documentation/connector) 查看更详细的信息。 diff --git a/documentation20/cn/03.architecture/02.replica/docs.md b/documentation20/cn/03.architecture/02.replica/docs.md index 59192ee0cc1fdeb130e2f541b424af284fbc916a..183306eed80884a06bf0b85f000c12a0ccaeaaf3 100644 --- a/documentation20/cn/03.architecture/02.replica/docs.md +++ b/documentation20/cn/03.architecture/02.replica/docs.md @@ -140,7 +140,7 @@ TDengine采取的是Master-Slave模式进行同步,与流行的RAFT一致性 整个数据恢复流程分为两大步骤,第一步,先恢复archived data(file), 然后恢复wal。具体流程如下: -![replica-forward.png](page://images/architecture/replica-forward.png) +![replica-restore.png](page://images/architecture/replica-restore.png) 1. 通过已经建立的TCP连接,发送sync req给master节点 2. master收到sync req后,以client的身份,向vnode B主动建立一新的专用于同步的TCP连接(syncFd) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index e6d214c74d3367d814e0022d4c6a147c4a44b326..511bab8a605ce666d263d609d1599e30c85d78c4 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -49,6 +49,7 @@ TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致 +注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 ## 如何获取 taos-jdbcdriver @@ -531,8 +532,9 @@ Query OK, 1 row(s) in set (0.000141s) | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | -------------------- | ----------------- | -------- | -| 2.0.22 | 2.0.18.0 及以上 | 1.8.x | -| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.0 | 1.8.x | +| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | | 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | @@ -551,7 +553,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 | BIGINT | java.lang.Long | | FLOAT | java.lang.Float | | DOUBLE | java.lang.Double | -| SMALLINT | java.lang.Short | +| SMALLINT | java.lang.Short | | TINYINT | java.lang.Byte | | BOOL | java.lang.Boolean | | BINARY | byte array | diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index c74d1ebc3e651ab4e8747d8d1117233392d0b4b4..3a6e884f56addc7d2d4ccacad57ef3baa6844a4b 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -259,7 +259,7 @@ typedef struct taosField { 获取最近一次API调用失败的原因,返回值为字符串。 -- `char *taos_errno(TAOS_RES *res)` +- `int taos_errno(TAOS_RES *res)` 获取最近一次API调用失败的原因,返回值为错误代码。 @@ -427,12 +427,15 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 * res:查询结果集,注意结果集中可能没有记录 * param:调用 `taos_subscribe`时客户程序提供的附加参数 * code:错误码 + **注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。 * `TAOS_RES *taos_consume(TAOS_SUB *tsub)` 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。 + **注意**:在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。 + * `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 @@ -554,6 +557,13 @@ c1.close() conn.close() ``` +#### 关于纳秒 (nanosecond) 在 Python 连接器中的说明 + +由于目前 Python 对 nanosecond 支持的不完善(参见链接 1. 2. ),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,涛思数据可能会修改相关接口。 + +1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds +2. https://www.python.org/dev/peps/pep-0564/ + #### 帮助信息 用户可通过python的帮助信息直接查看模块的使用信息,或者参考tests/examples/python中的示例程序。以下为部分常用类和方法: @@ -585,7 +595,9 @@ conn.close() ## RESTful Connector -为支持各种不同类型平台的开发,TDengine提供符合REST设计标准的API,即RESTful API。为最大程度降低学习成本,不同于其他数据库RESTful API的设计方法,TDengine直接通过HTTP POST 请求BODY中包含的SQL语句来操作数据库,仅需要一个URL。RESTful连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 +为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 RESTful API。为最大程度降低学习成本,不同于其他数据库 RESTful API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。RESTful 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 + +注意:与标准连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。 ### HTTP请求格式 @@ -803,7 +815,7 @@ C#连接器支持的系统有:Linux 64/Windows x64/Windows x86 * 应用驱动安装请参考[安装连接器驱动步骤](https://www.taosdata.com/cn/documentation/connector#driver)。 * .NET接口文件TDengineDrivercs.cs和参考程序示例TDengineTest.cs均位于Windows客户端install_directory/examples/C#目录下。 -* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(dapper)框架驱动。 +* 在Windows系统上,C#应用程序可以使用TDengine的原生C接口来执行所有数据库操作,后续版本将提供ORM(Dapper)框架驱动。 ### 安装验证 @@ -895,6 +907,10 @@ go env -w GOPROXY=https://goproxy.io,direct sql.Open内置的方法,Close closes the statement. +### 其他代码示例 + +[Consume Messages from Kafka](https://github.com/taosdata/go-demo-kafka) 是一个通过 Go 语言实现消费 Kafka 队列写入 TDengine 的示例程序,也可以作为通过 Go 连接 TDengine 的写法参考。 + ## Node.js Connector Node.js连接器支持的系统有: diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index d1dba5a736cb3c9dac596e553b3f49c93ca5ce29..b47f297ae0a68c91e5d38aad000acdb14591283d 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -77,48 +77,43 @@ sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tde ## MATLAB -MATLAB可以通过安装包内提供的JDBC Driver直接连接到TDengine获取数据到本地工作空间。 +MATLAB 可以通过安装包内提供的 JDBC Driver 直接连接到 TDengine 获取数据到本地工作空间。 -### MATLAB的JDBC接口适配 +### MATLAB 的 JDBC 接口适配 -MATLAB的适配有下面几个步骤,下面以Windows10上适配MATLAB2017a为例: +MATLAB 的适配有下面几个步骤,下面以 Windows 10 上适配 MATLAB2021a 为例: -- 将TDengine安装包内的驱动程序JDBCDriver-1.0.0-dist.jar拷贝到${matlab_root}\MATLAB\R2017a\java\jar\toolbox -- 将TDengine安装包内的taos.lib文件拷贝至${matlab_ root _dir}\MATLAB\R2017a\lib\win64 -- 将新添加的驱动jar包加入MATLAB的classpath。在${matlab_ root _dir}\MATLAB\R2017a\toolbox\local\classpath.txt文件中添加下面一行 -​ +- 将 TDengine 客户端安装路径下的 `\TDengine\connector\jdbc的驱动程序taos-jdbcdriver-2.0.25-dist.jar` 拷贝到 `${matlab_root}\MATLAB\R2021a\java\jar\toolbox`。 +- 将 TDengine 安装包内的 `taos.lib` 文件拷贝至 `${matlab_root_dir}\MATLAB\R2021\lib\win64`。 +- 将新添加的驱动 jar 包加入 MATLAB 的 classpath。在 `${matlab_root_dir}\MATLAB\R2021a\toolbox\local\classpath.txt` 文件中添加下面一行: ``` -$matlabroot/java/jar/toolbox/JDBCDriver-1.0.0-dist.jar +$matlabroot/java/jar/toolbox/taos-jdbcdriver-2.0.25-dist.jar ``` -- 在${user_home}\AppData\Roaming\MathWorks\MATLAB\R2017a\下添加一个文件javalibrarypath.txt, 并在该文件中添加taos.dll的路径,比如您的taos.dll是在安装时拷贝到了C:\Windows\System32下,那么就应该在javalibrarypath.txt中添加如下一行: -​ +- 在 `${user_home}\AppData\Roaming\MathWorks\MATLAB\R2021a\` 下添加一个文件 `javalibrarypath.txt`,并在该文件中添加 taos.dll 的路径,比如您的 taos.dll 是在安装时拷贝到了 `C:\Windows\System32` 下,那么就应该在 `javalibrarypath.txt` 中添加如下一行: ``` C:\Windows\System32 ``` -### 在MATLAB中连接TDengine获取数据 +### 在 MATLAB 中连接 TDengine 获取数据 -在成功进行了上述配置后,打开MATLAB。 +在成功进行了上述配置后,打开 MATLAB。 - 创建一个连接: - ```matlab -conn = database(‘db’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://127.0.0.1:0/’) +conn = database(‘test’, ‘root’, ‘taosdata’, ‘com.taosdata.jdbc.TSDBDriver’, ‘jdbc:TSDB://192.168.1.94:6030/’) ``` - 执行一次查询: - ```matlab sql0 = [‘select * from tb’] data = select(conn, sql0); ``` - 插入一条记录: - ```matlab sql1 = [‘insert into tb values (now, 1)’] exec(conn, sql1) ``` -更多例子细节请参考安装包内examples\Matlab\TDengineDemo.m文件。 +更多例子细节请参考安装包内 `examples\Matlab\TDengineDemo.m` 文件。 ## R diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index 62d709c279e96d05996dee977450ed81a27e517a..db20ca4edb6513f70ebbf17969be1c20dccb6163 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -85,7 +85,7 @@ taos> 将后续的数据节点添加到现有集群,具体有以下几步: -1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd; +1. 按照[《立即开始》](https://www.taosdata.com/cn/documentation/getting-started/)一章的方法在每个物理节点启动taosd;(注意:每个物理节点都需要在 taos.cfg 文件中将 firstEP 参数配置为新集群首个节点的 End Point——在本例中是 h1.taos.com:6030) 2. 在第一个数据节点,使用CLI程序taos, 登录进TDengine系统, 执行命令: diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index ae13a36f76ff92994c474452d91655adb37a6722..19e4b761bad466ef80f2eb2ab128e99a4903b2d7 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -123,15 +123,15 @@ taosd -C - minRows:文件块中记录的最小条数。单位为条,默认值:100。 - maxRows:文件块中记录的最大条数。单位为条,默认值:4096。 - comp:文件压缩标志位。0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改) -- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel)(可通过 alter database 修改) -- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。(可通过 alter database 修改) +- wal:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。(在 taos.cfg 中参数名需要写作 walLevel) +- fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。 - cache:内存块的大小。单位为兆字节(MB),默认值:16。 - blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改) - replica:副本个数。取值范围:1-3,单位为个,默认值:1。(可通过 alter database 修改) - quorum:多副本环境下指令执行的确认数要求。取值范围:1、2,单位为个,默认值:1。(可通过 alter database 修改) - precision:时间戳精度标识。ms表示毫秒,us表示微秒,默认值:ms。(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) - cacheLast:是否在内存中缓存子表的最近数据。0:关闭;1:缓存子表最近一行数据;2:缓存子表每一列的最近的非NULL值;3:同时打开缓存最近行和列功能。默认值:0。(可通过 alter database 修改)(从 2.1.2.0 版本开始此参数支持 0~3 的取值范围,在此之前取值只能是 [0, 1];而 2.0.11.0 之前的版本在 SQL 指令中不支持此参数。)(2.1.2.0 版本之前、2.0.20.7 版本之前在 taos.cfg 文件中不支持此参数。) -- update:是否允许更新。0:不允许;1:允许。默认值:0。(可通过 alter database 修改) +- update:是否允许更新。0:不允许;1:允许。默认值:0。 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: @@ -418,6 +418,19 @@ TDengine启动后,会自动创建一个监测数据库log,并自动将服务 这些监测信息的采集缺省是打开的,但可以修改配置文件里的选项enableMonitor将其关闭或打开。 + +## 性能优化 + +因数据行 [update](https://www.taosdata.com/cn/documentation/faq#update)、表删除、数据过期等原因,TDengine 的磁盘存储文件有可能出现数据碎片,影响查询操作的性能表现。从 2.1.3.0 版本开始,新增 SQL 指令 COMPACT 来启动碎片重整过程: + +```mysql +COMPACT VNODES IN (vg_id1, vg_id2, ...) +``` + +COMPACT 命令对指定的一个或多个 VGroup 启动碎片重整,系统会通过任务队列尽快安排重整操作的具体执行。COMPACT 指令所需的 VGroup id,可以通过 `SHOW VGROUPS;` 指令的输出结果获取;而且在 `SHOW VGROUPS;` 中会有一个 compacting 列,值为 1 时表示对应的 VGroup 正在进行碎片重整,为 0 时则表示并没有处于重整状态。 + +需要注意的是,碎片重整操作会大幅消耗磁盘 I/O。因此在重整进行期间,有可能会影响节点的写入和查询性能,甚至在极端情况下导致短时间的阻写。 + ## 文件目录结构 安装TDengine后,默认会在操作系统中生成下列目录或文件: @@ -465,43 +478,44 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下: -| 关键字列表 | | | | | -| ---------- | ----------- | ------------ | ---------- | --------- | -| ABLOCKS | CONNECTIONS | HAVING | MODULES | SMALLINT | -| ABORT | COPY | ID | NCHAR | SPREAD | -| ACCOUNT | COUNT | IF | NE | STABLE | -| ACCOUNTS | CREATE | IGNORE | NONE | STABLES | -| ADD | CTIME | IMMEDIATE | NOT | STAR | -| AFTER | DATABASE | IMPORT | NOTNULL | STATEMENT | -| ALL | DATABASES | IN | NOW | STDDEV | -| ALTER | DAYS | INITIALLY | OF | STREAM | -| AND | DEFERRED | INSERT | OFFSET | STREAMS | -| AS | DELIMITERS | INSTEAD | OR | STRING | -| ASC | DESC | INTEGER | ORDER | SUM | -| ATTACH | DESCRIBE | INTERVAL | PASS | TABLE | -| AVG | DETACH | INTO | PERCENTILE | TABLES | -| BEFORE | DIFF | IP | PLUS | TAG | -| BEGIN | DISTINCT | IS | PRAGMA | TAGS | -| BETWEEN | DIVIDE | ISNULL | PREV | TBLOCKS | -| BIGINT | DNODE | JOIN | PRIVILEGE | TBNAME | -| BINARY | DNODES | KEEP | QUERIES | TIMES | -| BITAND | DOT | KEY | QUERY | TIMESTAMP | -| BITNOT | DOUBLE | KILL | RAISE | TINYINT | -| BITOR | DROP | LAST | REM | TOP | -| BOOL | EACH | LE | REPLACE | TOPIC | -| BOTTOM | END | LEASTSQUARES | REPLICA | TRIGGER | -| BY | EQ | LIKE | RESET | UMINUS | -| CACHE | EXISTS | LIMIT | RESTRICT | UNION | -| CASCADE | EXPLAIN | LINEAR | ROW | UPLUS | -| CHANGE | FAIL | LOCAL | ROWS | USE | -| CLOG | FILL | LP | RP | USER | -| CLUSTER | FIRST | LSHIFT | RSHIFT | USERS | -| COLON | FLOAT | LT | SCORES | USING | -| COLUMN | FOR | MATCH | SELECT | VALUES | -| COMMA | FROM | MAX | SEMI | VARIABLE | -| COMP | GE | METRIC | SET | VGROUPS | -| CONCAT | GLOB | METRICS | SHOW | VIEW | -| CONFIGS | GRANTS | MIN | SLASH | WAVG | -| CONFLICT | GROUP | MINUS | SLIDING | WHERE | -| CONNECTION | GT | MNODES | SLIMIT | | +| 关键字列表 | | | | | +| ------------ | ------------ | ------------ | ------------ | ------------ | +| ABORT | CREATE | IGNORE | NULL | STAR | +| ACCOUNT | CTIME | IMMEDIATE | OF | STATE | +| ACCOUNTS | DATABASE | IMPORT | OFFSET | STATEMENT | +| ADD | DATABASES | IN | OR | STATE_WINDOW | +| AFTER | DAYS | INITIALLY | ORDER | STORAGE | +| ALL | DBS | INSERT | PARTITIONS | STREAM | +| ALTER | DEFERRED | INSTEAD | PASS | STREAMS | +| AND | DELIMITERS | INT | PLUS | STRING | +| AS | DESC | INTEGER | PPS | SYNCDB | +| ASC | DESCRIBE | INTERVAL | PRECISION | TABLE | +| ATTACH | DETACH | INTO | PREV | TABLES | +| BEFORE | DISTINCT | IS | PRIVILEGE | TAG | +| BEGIN | DIVIDE | ISNULL | QTIME | TAGS | +| BETWEEN | DNODE | JOIN | QUERIES | TBNAME | +| BIGINT | DNODES | KEEP | QUERY | TIMES | +| BINARY | DOT | KEY | QUORUM | TIMESTAMP | +| BITAND | DOUBLE | KILL | RAISE | TINYINT | +| BITNOT | DROP | LE | REM | TOPIC | +| BITOR | EACH | LIKE | REPLACE | TOPICS | +| BLOCKS | END | LIMIT | REPLICA | TRIGGER | +| BOOL | EQ | LINEAR | RESET | TSERIES | +| BY | EXISTS | LOCAL | RESTRICT | UMINUS | +| CACHE | EXPLAIN | LP | ROW | UNION | +| CACHELAST | FAIL | LSHIFT | RP | UNSIGNED | +| CASCADE | FILE | LT | RSHIFT | UPDATE | +| CHANGE | FILL | MATCH | SCORES | UPLUS | +| CLUSTER | FLOAT | MAXROWS | SELECT | USE | +| COLON | FOR | MINROWS | SEMI | USER | +| COLUMN | FROM | MINUS | SESSION | USERS | +| COMMA | FSYNC | MNODES | SET | USING | +| COMP | GE | MODIFY | SHOW | VALUES | +| COMPACT | GLOB | MODULES | SLASH | VARIABLE | +| CONCAT | GRANTS | NCHAR | SLIDING | VARIABLES | +| CONFLICT | GROUP | NE | SLIMIT | VGROUPS | +| CONNECTION | GT | NONE | SMALLINT | VIEW | +| CONNECTIONS | HAVING | NOT | SOFFSET | VNODES | +| CONNS | ID | NOTNULL | STABLE | WAL | +| COPY | IF | NOW | STABLES | WHERE | diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index abbe7d8a0a9345c4145c0b65f43e56d5d6039d53..72f4876dcfcab3f99bc34acb264db5aff11c79ac 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -89,13 +89,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ```mysql USE db_name; ``` - 使用/切换数据库 + 使用/切换数据库(在 RESTful 连接方式下无效)。 - **删除数据库** ```mysql DROP DATABASE [IF EXISTS] db_name; ``` - 删除数据库。所包含的全部数据表将被删除,谨慎使用 + 删除数据库。指定 Database 所包含的全部数据表将被删除,谨慎使用! - **修改数据库参数** ```mysql @@ -129,22 +129,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 CACHELAST 参数控制是否在内存中缓存子表的最近数据。缺省值为 0,取值范围 [0, 1, 2, 3]。其中 0 表示不缓存,1 表示缓存子表最近一行数据,2 表示缓存子表每一列的最近的非 NULL 值,3 表示同时打开缓存最近行和列功能。(从 2.0.11.0 版本开始支持参数值 [0, 1],从 2.1.2.0 版本开始支持参数值 [0, 1, 2, 3]。) 说明:缓存最近行,将显著改善 LAST_ROW 函数的性能表现;缓存每列的最近非 NULL 值,将显著改善无特殊影响(WHERE、ORDER BY、GROUP BY、INTERVAL)下的 LAST 函数的性能表现。 - ```mysql - ALTER DATABASE db_name WAL 1; - ``` - WAL 参数控制 WAL 日志的落盘方式。缺省值为 1,取值范围为 [1, 2]。1 表示写 WAL,但不执行 fsync;2 表示写 WAL,而且执行 fsync。 - - ```mysql - ALTER DATABASE db_name FSYNC 3000; - ``` - FSYNC 参数控制执行 fsync 操作的周期。缺省值为 3000,单位是毫秒,取值范围为 [0, 180000]。如果设置为 0,表示每次写入,立即执行 fsync。该设置项主要用于调节 WAL 参数设为 2 时的系统行为。 - - ```mysql - ALTER DATABASE db_name UPDATE 0; - ``` - UPDATE 参数控制是否允许更新数据。缺省值为 0,取值范围为 [0, 1]。0 表示会直接丢弃后写入的相同时间戳的数据;1 表示会使用后写入的数据覆盖已有的相同时间戳的数据。 - - **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.1.0 版本开始,修改这些参数后无需重启服务器即可生效。 + **Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。另外,从 2.1.3.0 版本开始,修改这些参数后无需重启服务器即可生效。 - **显示系统所有数据库** @@ -223,7 +208,7 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 说明:可在like中使用通配符进行名称的匹配,这一通配符字符串最长不能超过24字节。 - 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’\_’下划线匹配一个字符。 + 通配符匹配:1)'%'(百分号)匹配0到任意个字符;2)'\_'下划线匹配单个任意字符。 - **显示一个数据表的创建语句** @@ -263,6 +248,14 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ``` 如果表是通过超级表创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构。 +- **表修改列宽** + + ```mysql + ALTER TABLE tb_name MODIFY COLUMN field_name data_type(length); + ``` + 如果数据列的类型是可变长格式(BINARY 或 NCHAR),那么可以使用此指令修改其宽度(只能改大,不能改小)。(2.1.3.0 版本新增) + 如果表是通过超级表创建,更改表结构的操作只能对超级表进行。同时针对超级表的结构更改对所有通过该结构创建的表生效。对于不是通过超级表创建的表,可以直接修改表结构。 + ## 超级表STable管理 注意:在 2.0.15.0 及以后的版本中,开始支持 STABLE 保留字。也即,在本节后文的指令说明中,CREATE、DROP、ALTER 三个指令在老版本中保留字需写作 TABLE 而不是 STABLE。 @@ -276,11 +269,11 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 说明: - 1) TAGS 列的数据类型不能是 timestamp 类型; + 1) TAGS 列的数据类型不能是 timestamp 类型;(从 2.1.3.0 版本开始,TAGS 列中支持使用 timestamp 类型,但需注意在 TAGS 中的 timestamp 列写入数据时需要提供给定值,而暂不支持四则运算,例如 `NOW + 10s` 这类表达式) 2) TAGS 列名不能与其他列名相同; - 3) TAGS 列名不能为预留关键字; + 3) TAGS 列名不能为预留关键字(参见:[参数限制与保留关键字](https://www.taosdata.com/cn/documentation/administrator#keywords) 章节); 4) TAGS 最多允许 128 个,至少 1 个,总长度不超过 16 KB。 @@ -323,6 +316,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ALTER STABLE stb_name DROP COLUMN field_name; ``` +- **超级表修改列宽** + + ```mysql + ALTER STABLE stb_name MODIFY COLUMN field_name data_type(length); + ``` + 如果数据列的类型是可变长格式(BINARY 或 NCHAR),那么可以使用此指令修改其宽度(只能改大,不能改小)。(2.1.3.0 版本新增) + ## 超级表 STable 中 TAG 管理 - **添加标签** @@ -346,6 +346,13 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ``` 修改超级表的标签名,从超级表修改某个标签名后,该超级表下的所有子表也会自动更新该标签名。 +- **修改标签列宽度** + + ```mysql + ALTER STABLE stb_name MODIFY TAG tag_name data_type(length); + ``` + 如果标签的类型是可变长格式(BINARY 或 NCHAR),那么可以使用此指令修改其宽度(只能改大,不能改小)。(2.1.3.0 版本新增) + - **修改子表标签值** ```mysql @@ -355,77 +362,82 @@ TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传 ## 数据写入 -- **插入一条记录** - ```mysql - INSERT INTO tb_name VALUES (field_value, ...); - ``` - 向表tb_name中插入一条记录。 +### 写入语法: -- **插入一条记录,数据对应到指定的列** +```mysql +INSERT INTO + tb_name + [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)] + [(field1_name, ...)] + VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path + [tb2_name + [USING stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)] + [(field1_name, ...)] + VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path + ...]; +``` + +### 详细描述及示例: + +- **插入一条或多条记录** + 指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录: ```mysql - INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...); + INSERT INTO d1001 VALUES (NOW, 10.2, 219, 0.32); ``` - 向表tb_name中插入一条记录,数据对应到指定的列。SQL语句中没有出现的列,数据库将自动填充为NULL。主键(时间戳)不能为NULL。 - -- **插入多条记录** + 或者,可以通过如下语句写入两行记录: ```mysql - INSERT INTO tb_name VALUES (field1_value1, ...) (field1_value2, ...) ...; + INSERT INTO d1001 VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32) (1626164208000, 10.15, 217, 0.33); ``` - 向表tb_name中插入多条记录。 - **注意**:在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为now,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。 + **注意:** + 1)在第二个例子中,两行记录的首列时间戳使用了不同格式的写法。其中字符串格式的时间戳写法不受所在 DATABASE 的时间精度设置影响;而长整形格式的时间戳写法会受到所在 DATABASE 的时间精度设置影响——例子中的时间戳在毫秒精度下可以写作 1626164208000,而如果是在微秒精度设置下就需要写为 1626164208000000。 + 2)在使用“插入多条记录”方式写入数据时,不能把第一列的时间戳取值都设为 NOW,否则会导致语句中的多条记录使用相同的时间戳,于是就可能出现相互覆盖以致这些数据行无法全部被正确保存。其原因在于,NOW 函数在执行中会被解析为所在 SQL 语句的实际执行时间,出现在同一语句中的多个 NOW 标记也就会被替换为完全相同的时间戳取值。 + 3)允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的 keep 值(数据保留的天数);允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的 days 值(数据文件存储数据的时间跨度,单位为天)。keep 和 days 都是可以在创建数据库时指定的,缺省值分别是 3650 天和 10 天。 -- **按指定的列插入多条记录** +- **插入记录,数据对应到指定的列** + 向数据子表中插入记录时,无论插入一行还是多行,都可以让数据对应到指定的列。对于 SQL 语句中没有出现的列,数据库将自动填充为 NULL。主键(时间戳)不能为 NULL。例如: ```mysql - INSERT INTO tb_name (field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...; + INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, 0.31); ``` - 向表tb_name中按指定的列插入多条记录。 + **说明:**如果不指定列,也即使用全列模式——那么在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列模式写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入 NULL。 -- **向多个表插入多条记录** +- **向多个表插入记录** + 可以在一条语句中,分别向多个表插入一条或多条记录,并且也可以在插入过程中指定列。例如: ```mysql - INSERT INTO tb1_name VALUES (field1_value1, ...) (field1_value2, ...) ... - tb2_name VALUES (field1_value1, ...) (field1_value2, ...) ...; + INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) + d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` - 同时向表tb1_name和tb2_name中分别插入多条记录。 -- **同时向多个表按列插入多条记录** +- **插入记录时自动建表** + 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。例如: ```mysql - INSERT INTO tb1_name (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ... - tb2_name (tb2_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...; + INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:32.272', 10.2, 219, 0.32); ``` - 同时向表tb1_name和tb2_name中按列分别插入多条记录。 - - 注意: - 1) 如果时间戳为now,系统将自动使用客户端当前时间作为该记录的时间戳; - 2) 允许插入的最老记录的时间戳,是相对于当前服务器时间,减去配置的keep值(数据保留的天数),允许插入的最新记录的时间戳,是相对于当前服务器时间,加上配置的days值(数据文件存储数据的时间跨度,单位为天)。keep和days都是可以在创建数据库时指定的,缺省值分别是3650天和10天。 - -- **插入记录时自动建表** + 也可以在自动建表时,只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将置为 NULL。例如: ```mysql - INSERT INTO tb_name USING stb_name TAGS (tag_value1, ...) VALUES (field_value1, ...); + INSERT INTO d21001 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:33.196', 10.15, 217, 0.33); ``` - 如果用户在写数据时并不确定某个表是否存在,此时可以在写入数据时使用自动建表语法来创建不存在的表,若该表已存在则不会建立新表。自动建表时,要求必须以超级表为模板,并写明数据表的 TAGS 取值。 - -- **插入记录时自动建表,并指定具体的 TAGS 列** + 自动建表语法也支持在一条语句中向多个表插入记录。例如: ```mysql - INSERT INTO tb_name USING stb_name (tag_name1, ...) TAGS (tag_value1, ...) VALUES (field_value1, ...); + INSERT INTO d21001 USING meters TAGS ('Beijing.Chaoyang', 2) VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) + d21002 USING meters (groupdId) TAGS (2) VALUES ('2021-07-13 14:06:34.255', 10.15, 217, 0.33) + d21003 USING meters (groupdId) TAGS (2) (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` - 在自动建表时,可以只是指定部分 TAGS 列的取值,未被指定的 TAGS 列将取为空值。 + **说明:**在 2.0.20.5 版本之前,在使用自动建表语法并指定列时,子表的列名必须紧跟在子表名称后面,而不能如例子里那样放在 TAGS 和 VALUES 之间。从 2.0.20.5 版本开始,两种写法都可以,但不能在一条 SQL 语句中混用,否则会报语法错误。 -- **同时向多个表按列插入多条记录,自动建表** - ```mysql - INSERT INTO tb1_name (tb1_field1_name, ...) [USING stb1_name TAGS (tag_value1, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ... - tb2_name (tb2_field1_name, ...) [USING stb2_name TAGS (tag_value2, ...)] VALUES (field1_value1, ...) (field1_value2, ...) ...; +- **插入来自文件的数据记录** + 除了使用 VALUES 关键字插入一行或多行数据外,也可以把要写入的数据放在 CSV 文件中(英文逗号分隔、英文单引号括住每个值)供 SQL 指令读取。其中 CSV 文件无需表头。例如,如果 /tmp/csvfile.csv 文件的内容为: ``` - 以自动建表的方式,同时向表tb1_name和tb2_name中按列分别插入多条记录。 - 说明:`(tb1_field1_name, ...)`的部分可以省略掉,这样就是使用全列模式写入——也即在 VALUES 部分提供的数据,必须为数据表的每个列都显式地提供数据。全列写入速度会远快于指定列,因此建议尽可能采用全列写入方式,此时空列可以填入NULL。 - 从 2.0.20.5 版本开始,子表的列名可以不跟在子表名称后面,而是可以放在 TAGS 和 VALUES 之间,例如像下面这样写: + '2021-07-13 14:07:34.630', '10.2', '219', '0.32' + '2021-07-13 14:07:35.779', '10.15', '217', '0.33' + ``` + 那么通过如下指令可以把这个文件中的数据写入子表中: ```mysql - INSERT INTO tb1_name [USING stb1_name TAGS (tag_value1, ...)] (tb1_field1_name, ...) VALUES (field1_value1, ...) (field1_value2, ...) ...; + INSERT INTO d1001 FILE '/tmp/csvfile.csv'; ``` - 注意:虽然两种写法都可以,但并不能在一条 SQL 语句中混用,否则会报语法错误。 **历史记录写入**:可使用IMPORT或者INSERT命令,IMPORT的语法,功能与INSERT完全一样。 -说明:针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分SQL仍会执行。下面的sql中,insert语句是无效的,但是d1001仍会被创建。 +**说明:**针对 insert 类型的 SQL 语句,我们采用的流式解析策略,在发现后面的错误之前,前面正确的部分 SQL 仍会执行。下面的 SQL 中,INSERT 语句是无效的,但是 d1001 仍会被创建。 ```mysql taos> CREATE TABLE meters(ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS(location BINARY(30), groupId INT); @@ -459,9 +471,10 @@ Query OK, 1 row(s) in set (0.001091s) SELECT select_expr [, select_expr ...] FROM {tb_name_list} [WHERE where_condition] - [INTERVAL (interval_val [, interval_offset])] - [SLIDING sliding_val] - [FILL fill_val] + [SESSION(ts_col, tol_val)] + [STATE_WINDOW(col)] + [INTERVAL(interval_val [, interval_offset]) [SLIDING sliding_val]] + [FILL(fill_mod_and_val)] [GROUP BY col_list] [ORDER BY col_list { DESC | ASC }] [SLIMIT limit_val [SOFFSET offset_val]] @@ -678,29 +691,31 @@ Query OK, 1 row(s) in set (0.001091s) * 暂不支持含列名的四则运算表达式作为 SQL 函数的应用对象(例如,不支持 `select min(2*a) from t;`,但可以写 `select 2*min(a) from t;`)。 - WHERE 语句可以使用各种逻辑判断来过滤数字值,或使用通配符来过滤字符串。 - 输出结果缺省按首列时间戳升序排序,但可以指定按降序排序( _c0 指首列时间戳)。使用 ORDER BY 对其他字段进行排序为非法操作。 -- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。 +- 参数 LIMIT 控制输出条数,OFFSET 指定从第几条开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。且 `LIMIT 5 OFFSET 2` 可以简写为 `LIMIT 2, 5`。 * 在有 GROUP BY 子句的情况下,LIMIT 参数控制的是每个分组中至多允许输出的条数。 -- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。 +- 参数 SLIMIT 控制由 GROUP BY 指令划分的分组中,至多允许输出几个分组的数据。且 `SLIMIT 5 SOFFSET 2` 可以简写为 `SLIMIT 2, 5`。 - 通过 “>>” 输出结果可以导出到指定文件。 ### 支持的条件过滤操作 -| **Operation** | **Note** | **Applicable Data Types** | -| --------------- | ----------------------------- | ------------------------------------- | -| > | larger than | **`timestamp`** and all numeric types | -| < | smaller than | **`timestamp`** and all numeric types | -| >= | larger than or equal to | **`timestamp`** and all numeric types | -| <= | smaller than or equal to | **`timestamp`** and all numeric types | -| = | equal to | all types | -| <> | not equal to | all types | -| between and | within a certain range | **`timestamp`** and all numeric types | -| % | match with any char sequences | **`binary`** **`nchar`** | -| _ | match with a single char | **`binary`** **`nchar`** | +| **Operation** | **Note** | **Applicable Data Types** | +| --------------- | ----------------------------- | ----------------------------------------- | +| > | larger than | **`timestamp`** and all numeric types | +| < | smaller than | **`timestamp`** and all numeric types | +| >= | larger than or equal to | **`timestamp`** and all numeric types | +| <= | smaller than or equal to | **`timestamp`** and all numeric types | +| = | equal to | all types | +| <> | not equal to | all types | +| between and | within a certain range | **`timestamp`** and all numeric types | +| in | matches any value in a set | all types except first column `timestamp` | +| % | match with any char sequences | **`binary`** **`nchar`** | +| _ | match with a single char | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 -3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。 -4. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 +4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功;TIMESTAMP 类型支持非主键的列。 |<----------- body of column data tuple ------------------->| + * | |<----------------- flen ------------->|<--- value part --->| + * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | + * +-----------+----------+----------+--------------------------------------|--------------------| + * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | + * +-----------+----------+----------+--------------------------------------+--------------------| + * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. + * 2. dataTLen: total length including the header and body. + */ + + if (memRowType == SMEM_ROW_DATA) { + SDataRow trow = (SDataRow)memRowDataBody(memRow); + dataRowSetLen(trow, (TDRowLenT)(TD_DATA_ROW_HEAD_SIZE + pBuilder->flen)); + dataRowSetVersion(trow, pBuilder->sversion); + + p = (char*)payloadBody(pBuilder->buf); + uint16_t i = 0, j = 0; + while (j < nCols) { + if (i >= nColsBound) { + break; + } + int16_t colId = payloadColId(p); + if (colId == pSchema[j].colId) { + // ASSERT(payloadColType(p) == pSchema[j].type); + tdAppendColVal(trow, POINTER_SHIFT(pVals, payloadColOffset(p)), pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + p = payloadNextCol(p); + ++i; + ++j; + } else if (colId < pSchema[j].colId) { + p = payloadNextCol(p); + ++i; + } else { + tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + ++j; + } + } + + while (j < nCols) { + tdAppendColVal(trow, getNullValue(pSchema[j].type), pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + ++j; + } + + #if 0 // no need anymore + while (i < nColsBound) { + p = payloadNextCol(p); + ++i; + } + #endif + + } else if (memRowType == SMEM_ROW_KV) { + SKVRow kvRow = (SKVRow)memRowKvBody(memRow); + kvRowSetLen(kvRow, (TDRowLenT)(TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * nColsBound)); + kvRowSetNCols(kvRow, nColsBound); + memRowSetKvVersion(memRow, pBuilder->sversion); + + p = (char*)payloadBody(pBuilder->buf); + int i = 0; + while (i < nColsBound) { + int16_t colId = payloadColId(p); + uint8_t colType = payloadColType(p); + tdAppendKvColVal(kvRow, POINTER_SHIFT(pVals,payloadColOffset(p)), colId, colType, toffset); + toffset += sizeof(SColIdx); + p = payloadNextCol(p); + ++i; + } + + } else { + ASSERT(0); + } + int32_t rowTLen = memRowTLen(memRow); + pBuilder->pDataBlock = (char*)pBuilder->pDataBlock + rowTLen; // next row + pBuilder->pSubmitBlk->dataLen += rowTLen; + + return memRow; +} + +// Erase the empty space reserved for binary data +static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, SInsertStatementParam* insertParam, + SBlockKeyTuple* blkKeyTuple) { // TODO: optimize this function, handle the case while binary is not presented - STableMeta* pTableMeta = pTableDataBlock->pTableMeta; - STableComInfo tinfo = tscGetTableInfo(pTableMeta); - SSchema* pSchema = tscGetTableSchema(pTableMeta); + STableMeta* pTableMeta = pTableDataBlock->pTableMeta; + STableComInfo tinfo = tscGetTableInfo(pTableMeta); + SSchema* pSchema = tscGetTableSchema(pTableMeta); SSubmitBlk* pBlock = pDataBlock; memcpy(pDataBlock, pTableDataBlock->pData, sizeof(SSubmitBlk)); @@ -1633,7 +1879,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo int32_t flen = 0; // original total length of row // schema needs to be included into the submit data block - if (includeSchema) { + if (insertParam->schemaAttached) { int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta); for(int32_t j = 0; j < numOfCols; ++j) { STColumn* pCol = (STColumn*) pDataBlock; @@ -1659,21 +1905,39 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo char* p = pTableDataBlock->pData + sizeof(SSubmitBlk); pBlock->dataLen = 0; int32_t numOfRows = htons(pBlock->numOfRows); - - for (int32_t i = 0; i < numOfRows; ++i) { - SDataRow trow = (SDataRow) pDataBlock; - dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen)); - dataRowSetVersion(trow, pTableMeta->sversion); - - int toffset = 0; - for (int32_t j = 0; j < tinfo.numOfColumns; j++) { - tdAppendColVal(trow, p, pSchema[j].type, pSchema[j].bytes, toffset); - toffset += TYPE_BYTES[pSchema[j].type]; - p += pSchema[j].bytes; + + if (IS_RAW_PAYLOAD(insertParam->payloadType)) { + for (int32_t i = 0; i < numOfRows; ++i) { + SMemRow memRow = (SMemRow)pDataBlock; + memRowSetType(memRow, SMEM_ROW_DATA); + SDataRow trow = memRowDataBody(memRow); + dataRowSetLen(trow, (uint16_t)(TD_DATA_ROW_HEAD_SIZE + flen)); + dataRowSetVersion(trow, pTableMeta->sversion); + + int toffset = 0; + for (int32_t j = 0; j < tinfo.numOfColumns; j++) { + tdAppendColVal(trow, p, pSchema[j].type, toffset); + toffset += TYPE_BYTES[pSchema[j].type]; + p += pSchema[j].bytes; + } + + pDataBlock = (char*)pDataBlock + memRowTLen(memRow); + pBlock->dataLen += memRowTLen(memRow); } + } else { + SMemRowBuilder rowBuilder; + rowBuilder.pSchema = pSchema; + rowBuilder.sversion = pTableMeta->sversion; + rowBuilder.flen = flen; + rowBuilder.nCols = tinfo.numOfColumns; + rowBuilder.pDataBlock = pDataBlock; + rowBuilder.pSubmitBlk = pBlock; + rowBuilder.buf = p; - pDataBlock = (char*)pDataBlock + dataRowLen(trow); - pBlock->dataLen += dataRowLen(trow); + for (int32_t i = 0; i < numOfRows; ++i) { + rowBuilder.buf = (blkKeyTuple + i)->payloadAddr; + tdGenMemRowFromBuilder(&rowBuilder); + } } int32_t len = pBlock->dataLen + pBlock->schemaLen; @@ -1684,7 +1948,7 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo } static int32_t getRowExpandSize(STableMeta* pTableMeta) { - int32_t result = TD_DATA_ROW_HEAD_SIZE; + int32_t result = TD_MEM_ROW_DATA_HEAD_SIZE; int32_t columns = tscGetNumOfColumns(pTableMeta); SSchema* pSchema = tscGetTableSchema(pTableMeta); for(int32_t i = 0; i < columns; i++) { @@ -1698,16 +1962,14 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) { static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeBlockMap) { pInsertParam->numOfTables = (int32_t) taosHashGetSize(pInsertParam->pTableBlockHashList); if (pInsertParam->pTableNameList == NULL) { - pInsertParam->pTableNameList = calloc(pInsertParam->numOfTables, POINTER_BYTES); - } else { - memset(pInsertParam->pTableNameList, 0, pInsertParam->numOfTables * POINTER_BYTES); + pInsertParam->pTableNameList = malloc(pInsertParam->numOfTables * POINTER_BYTES); } STableDataBlocks **p1 = taosHashIterate(pInsertParam->pTableBlockHashList, NULL); int32_t i = 0; while(p1) { STableDataBlocks* pBlocks = *p1; - tfree(pInsertParam->pTableNameList[i]); + //tfree(pInsertParam->pTableNameList[i]); pInsertParam->pTableNameList[i++] = tNameDup(&pBlocks->tableName); p1 = taosHashIterate(pInsertParam->pTableBlockHashList, p1); @@ -1719,14 +1981,18 @@ static void extractTableNameList(SInsertStatementParam *pInsertParam, bool freeB } int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBlockMap) { - const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg); - - void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); - SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); + const int INSERT_HEAD_SIZE = sizeof(SMsgDesc) + sizeof(SSubmitMsg); + int code = 0; + bool isRawPayload = IS_RAW_PAYLOAD(pInsertParam->payloadType); + void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); STableDataBlocks** p = taosHashIterate(pInsertParam->pTableBlockHashList, NULL); STableDataBlocks* pOneTableBlock = *p; + + SBlockKeyInfo blkKeyInfo = {0}; // share by pOneTableBlock + while(pOneTableBlock) { SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData; if (pBlocks->numOfRows > 0) { @@ -1740,37 +2006,54 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl tscError("0x%"PRIx64" failed to prepare the data block buffer for merging table data, code:%d", pInsertParam->objectId, ret); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); + tfree(blkKeyInfo.pKeyTuple); return ret; } int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); if (dataBuf->nAllocSize < destSize) { - while (dataBuf->nAllocSize < destSize) { - dataBuf->nAllocSize = (uint32_t)(dataBuf->nAllocSize * 1.5); - } + dataBuf->nAllocSize = (uint32_t)(destSize * 1.5); char* tmp = realloc(dataBuf->pData, dataBuf->nAllocSize); if (tmp != NULL) { dataBuf->pData = tmp; - memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); + //memset(dataBuf->pData + dataBuf->size, 0, dataBuf->nAllocSize - dataBuf->size); } else { // failed to allocate memory, free already allocated memory and return error code tscError("0x%"PRIx64" failed to allocate memory for merging submit block, size:%d", pInsertParam->objectId, dataBuf->nAllocSize); taosHashCleanup(pVnodeDataBlockHashList); tscDestroyBlockArrayList(pVnodeDataBlockList); tfree(dataBuf->pData); + tfree(blkKeyInfo.pKeyTuple); return TSDB_CODE_TSC_OUT_OF_MEMORY; } } - tscSortRemoveDataBlockDupRows(pOneTableBlock); - char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize*(pBlocks->numOfRows-1); + if (isRawPayload) { + tscSortRemoveDataBlockDupRowsRaw(pOneTableBlock); + char* ekey = (char*)pBlocks->data + pOneTableBlock->rowSize * (pBlocks->numOfRows - 1); - tscDebug("0x%"PRIx64" name:%s, name:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), - pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); + tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, + pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, + pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey)); + } else { + if ((code = tscSortRemoveDataBlockDupRows(pOneTableBlock, &blkKeyInfo)) != 0) { + taosHashCleanup(pVnodeDataBlockHashList); + tscDestroyBlockArrayList(pVnodeDataBlockList); + tfree(dataBuf->pData); + tfree(blkKeyInfo.pKeyTuple); + return code; + } + ASSERT(blkKeyInfo.pKeyTuple != NULL && pBlocks->numOfRows > 0); + SBlockKeyTuple* pLastKeyTuple = blkKeyInfo.pKeyTuple + pBlocks->numOfRows - 1; + tscDebug("0x%" PRIx64 " name:%s, tid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, + pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, + pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); + } + int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); @@ -1780,7 +2063,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pBlocks->schemaLen = 0; // erase the empty space reserved for binary data - int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam->schemaAttached); + int32_t finalLen = trimDataBlock(dataBuf->pData + dataBuf->size, pOneTableBlock, pInsertParam, blkKeyInfo.pKeyTuple); assert(finalLen <= len); dataBuf->size += (finalLen + sizeof(SSubmitBlk)); @@ -1808,6 +2091,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl // free the table data blocks; pInsertParam->pDataBlocks = pVnodeDataBlockList; taosHashCleanup(pVnodeDataBlockHashList); + tfree(blkKeyInfo.pKeyTuple); return TSDB_CODE_SUCCESS; } @@ -2048,27 +2332,31 @@ void tscFieldInfoCopy(SFieldInfo* pFieldInfo, const SFieldInfo* pSrc, const SArr SInternalField p = {.visible = pfield->visible, .field = pfield->field}; + bool found = false; int32_t resColId = pfield->pExpr->base.resColId; for(int32_t j = 0; j < numOfExpr; ++j) { SExprInfo* pExpr = taosArrayGetP(pExprList, j); if (pExpr->base.resColId == resColId) { p.pExpr = pExpr; + found = true; break; } } -// p.pExpr = calloc(1, sizeof(SExprInfo)); -// tscExprAssign(p.pExpr, pfield->pExpr); + if (!found) { + assert(pfield->pExpr->pExpr != NULL); + p.pExpr = calloc(1, sizeof(SExprInfo)); + tscExprAssign(p.pExpr, pfield->pExpr); + } + taosArrayPush(pFieldInfo->internalField, &p); } } } -SExprInfo* tscExprCreate(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, +SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, int32_t colType) { - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); - SExprInfo* pExpr = calloc(1, sizeof(SExprInfo)); if (pExpr == NULL) { return NULL; @@ -2090,21 +2378,22 @@ SExprInfo* tscExprCreate(SQueryInfo* pQueryInfo, int16_t functionId, SColumnInde } else if (functionId == TSDB_FUNC_BLKINFO) { p->colInfo.colId = pColIndex->columnIndex; p->colBytes = TSDB_MAX_BINARY_LEN; - p->colType = TSDB_DATA_TYPE_BINARY; + p->colType = TSDB_DATA_TYPE_BINARY; } else { + int32_t len = tListLen(p->colInfo.name); if (TSDB_COL_IS_TAG(colType)) { SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); p->colInfo.colId = pSchema[pColIndex->columnIndex].colId; p->colBytes = pSchema[pColIndex->columnIndex].bytes; p->colType = pSchema[pColIndex->columnIndex].type; - tstrncpy(p->colInfo.name, pSchema[pColIndex->columnIndex].name, sizeof(p->colInfo.name)); + snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema[pColIndex->columnIndex].name); } else if (pTableMetaInfo->pTableMeta != NULL) { // in handling select database/version/server_status(), the pTableMeta is NULL SSchema* pSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pColIndex->columnIndex); p->colInfo.colId = pSchema->colId; p->colBytes = pSchema->bytes; - p->colType = pSchema->type; - tstrncpy(p->colInfo.name, pSchema->name, sizeof(p->colInfo.name)); + p->colType = pSchema->type; + snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema->name); } } @@ -2129,15 +2418,17 @@ SExprInfo* tscExprInsert(SQueryInfo* pQueryInfo, int32_t index, int16_t function if (index == num) { return tscExprAppend(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); } - - SExprInfo* pExpr = tscExprCreate(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); + SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayInsert(pQueryInfo->exprList, index, &pExpr); return pExpr; } SExprInfo* tscExprAppend(SQueryInfo* pQueryInfo, int16_t functionId, SColumnIndex* pColIndex, int16_t type, int16_t size, int16_t resColId, int16_t interSize, bool isTagCol) { - SExprInfo* pExpr = tscExprCreate(pQueryInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pColIndex->tableIndex); + SExprInfo* pExpr = tscExprCreate(pTableMetaInfo, functionId, pColIndex, type, size, resColId, interSize, isTagCol); taosArrayPush(pQueryInfo->exprList, &pExpr); return pExpr; } @@ -2254,18 +2545,14 @@ int32_t tscExprCopyAll(SArray* dst, const SArray* src, bool deepcopy) { return 0; } -bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) { - // ignore the tbname columnIndex to be inserted into source list - if (columnIndex < 0) { - return false; - } - +// ignore the tbname columnIndex to be inserted into source list +int32_t tscColumnExists(SArray* pColumnList, int32_t columnId, uint64_t uid) { size_t numOfCols = taosArrayGetSize(pColumnList); int32_t i = 0; while (i < numOfCols) { SColumn* pCol = taosArrayGetP(pColumnList, i); - if ((pCol->columnIndex != columnIndex) || (pCol->tableUid != uid)) { + if ((pCol->info.colId != columnId) || (pCol->tableUid != uid)) { ++i; continue; } else { @@ -2274,10 +2561,10 @@ bool tscColumnExists(SArray* pColumnList, int32_t columnIndex, uint64_t uid) { } if (i >= numOfCols || numOfCols == 0) { - return false; + return -1; } - return true; + return i; } void tscExprAssign(SExprInfo* dst, const SExprInfo* src) { @@ -2363,13 +2650,7 @@ SColumn* tscColumnClone(const SColumn* src) { return NULL; } - dst->columnIndex = src->columnIndex; - dst->tableUid = src->tableUid; - dst->info.flist.numOfFilters = src->info.flist.numOfFilters; - dst->info.flist.filterInfo = tFilterInfoDup(src->info.flist.filterInfo, src->info.flist.numOfFilters); - dst->info.type = src->info.type; - dst->info.colId = src->info.colId; - dst->info.bytes = src->info.bytes; + tscColumnCopy(dst, src); return dst; } @@ -2378,6 +2659,18 @@ static void tscColumnDestroy(SColumn* pCol) { free(pCol); } +void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) { + destroyFilterInfo(&pDest->info.flist); + + pDest->columnIndex = pSrc->columnIndex; + pDest->tableUid = pSrc->tableUid; + pDest->info.flist.numOfFilters = pSrc->info.flist.numOfFilters; + pDest->info.flist.filterInfo = tFilterInfoDup(pSrc->info.flist.filterInfo, pSrc->info.flist.numOfFilters); + pDest->info.type = pSrc->info.type; + pDest->info.colId = pSrc->info.colId; + pDest->info.bytes = pSrc->info.bytes; +} + void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) { assert(src != NULL && dst != NULL); @@ -2835,6 +3128,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) { int32_t tscAddQueryInfo(SSqlCmd* pCmd) { assert(pCmd != NULL); SQueryInfo* pQueryInfo = calloc(1, sizeof(SQueryInfo)); + if (pQueryInfo == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } @@ -2913,13 +3207,17 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { pQueryInfo->tsBuf = NULL; pQueryInfo->fillType = pSrc->fillType; pQueryInfo->fillVal = NULL; + pQueryInfo->numOfFillVal = 0;; pQueryInfo->clauseLimit = pSrc->clauseLimit; + pQueryInfo->prjOffset = pSrc->prjOffset; pQueryInfo->numOfTables = 0; pQueryInfo->window = pSrc->window; pQueryInfo->sessionWindow = pSrc->sessionWindow; pQueryInfo->pTableMetaInfo = NULL; pQueryInfo->bufLen = pSrc->bufLen; + pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery; + pQueryInfo->arithmeticOnAgg = pSrc->arithmeticOnAgg; pQueryInfo->buf = malloc(pSrc->bufLen); if (pQueryInfo->buf == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2945,11 +3243,12 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { } if (pSrc->fillType != TSDB_FILL_NONE) { - pQueryInfo->fillVal = malloc(pSrc->fieldsInfo.numOfOutput * sizeof(int64_t)); + pQueryInfo->fillVal = calloc(1, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t)); if (pQueryInfo->fillVal == NULL) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } + pQueryInfo->numOfFillVal = pSrc->fieldsInfo.numOfOutput; memcpy(pQueryInfo->fillVal, pSrc->fillVal, pSrc->fieldsInfo.numOfOutput * sizeof(int64_t)); } @@ -2959,6 +3258,14 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { goto _error; } + if (pQueryInfo->arithmeticOnAgg) { + pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES); + if (tscExprCopyAll(pQueryInfo->exprList1, pSrc->exprList1, true) != 0) { + code = TSDB_CODE_TSC_OUT_OF_MEMORY; + goto _error; + } + } + tscColumnListCopyAll(pQueryInfo->colList, pSrc->colList); tscFieldInfoCopy(&pQueryInfo->fieldsInfo, &pSrc->fieldsInfo, pQueryInfo->exprList); @@ -2973,6 +3280,14 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) { tscAddTableMetaInfo(pQueryInfo, &p1->name, pMeta, p1->vgroupList, p1->tagColList, p1->pVgroupTables); } + SArray *pUdfInfo = NULL; + if (pSrc->pUdfInfo) { + pUdfInfo = taosArrayDup(pSrc->pUdfInfo); + } + + pQueryInfo->pUdfInfo = pUdfInfo; + pQueryInfo->udfCopy = true; + _error: return code; } @@ -3019,7 +3334,9 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo) { info->vgInfo.epAddr[j].fqdn = strdup(pInfo->vgInfo.epAddr[j].fqdn); } - info->itemList = taosArrayDup(pInfo->itemList); + if (pInfo->itemList) { + info->itemList = taosArrayDup(pInfo->itemList); + } } SArray* tscVgroupTableInfoDup(SArray* pVgroupTables) { @@ -3244,6 +3561,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNew->pTscObj = pSql->pTscObj; pNew->signature = pNew; pNew->sqlstr = strdup(pSql->sqlstr); + tsem_init(&pNew->rspSem, 0, 0); SSqlCmd* pnCmd = &pNew->cmd; memcpy(pnCmd, pCmd, sizeof(SSqlCmd)); @@ -3259,6 +3577,9 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pnCmd->insertParam.numOfTables = 0; pnCmd->insertParam.pTableNameList = NULL; pnCmd->insertParam.pTableBlockHashList = NULL; + pnCmd->insertParam.objectId = pNew->self; + + memset(&pnCmd->insertParam.tagData, 0, sizeof(STagData)); if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -3267,6 +3588,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t SQueryInfo* pNewQueryInfo = tscGetQueryInfo(pnCmd); + if (pQueryInfo->pUdfInfo) { + pNewQueryInfo->pUdfInfo = taosArrayDup(pQueryInfo->pUdfInfo); + pNewQueryInfo->udfCopy = true; + } + pNewQueryInfo->command = pQueryInfo->command; pnCmd->active = pNewQueryInfo; @@ -3280,10 +3606,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pNewQueryInfo->tsBuf = NULL; pNewQueryInfo->fillType = pQueryInfo->fillType; pNewQueryInfo->fillVal = NULL; + pNewQueryInfo->numOfFillVal = 0; pNewQueryInfo->clauseLimit = pQueryInfo->clauseLimit; + pNewQueryInfo->prjOffset = pQueryInfo->prjOffset; pNewQueryInfo->numOfTables = 0; pNewQueryInfo->pTableMetaInfo = NULL; pNewQueryInfo->bufLen = pQueryInfo->bufLen; + pNewQueryInfo->buf = malloc(pQueryInfo->bufLen); if (pNewQueryInfo->buf == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -3309,11 +3638,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t } if (pQueryInfo->fillType != TSDB_FILL_NONE) { - pNewQueryInfo->fillVal = malloc(pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); + //just make memory memory sanitizer happy + //refator later + pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); if (pNewQueryInfo->fillVal == NULL) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } + pNewQueryInfo->numOfFillVal = pQueryInfo->fieldsInfo.numOfOutput; memcpy(pNewQueryInfo->fillVal, pQueryInfo->fillVal, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); } @@ -3477,6 +3809,7 @@ static void tscSubqueryRetrieveCallback(void* param, TAOS_RES* tres, int code) { if (pSql->res.code == TSDB_CODE_SUCCESS) { (*pSql->fp)(pParentSql->param, pParentSql, pParentSql->res.numOfRows); } else { + pParentSql->res.code = pSql->res.code; tscAsyncResultOnError(pParentSql); } } @@ -3502,6 +3835,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pSql->pSubs = calloc(pSql->subState.numOfSub, POINTER_BYTES); pSql->subState.states = calloc(pSql->subState.numOfSub, sizeof(int8_t)); + pthread_mutex_init(&pSql->subState.mutex, NULL); for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { SQueryInfo* pSub = taosArrayGetP(pQueryInfo->pUpstream, i); @@ -3520,6 +3854,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { pNew->signature = pNew; pNew->sqlstr = strdup(pSql->sqlstr); // todo refactor pNew->fp = tscSubqueryCompleteCallback; + tsem_init(&pNew->rspSem, 0, 0); SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id ps->pParentSql = pSql; @@ -3587,7 +3922,7 @@ bool tscIsUpdateQuery(SSqlObj* pSql) { } SSqlCmd* pCmd = &pSql->cmd; - return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_USE_DB == pCmd->command); + return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_RESET_CACHE == pCmd->command || TSDB_SQL_USE_DB == pCmd->command); } char* tscGetSqlStr(SSqlObj* pSql) { @@ -3777,17 +4112,21 @@ void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp) { //backup the total number of result first int64_t num = pRes->numOfTotal + pRes->numOfClauseTotal; - // DON't free final since it may be recoreded and used later in APP TAOS_FIELD* finalBk = pRes->final; pRes->final = NULL; tscFreeSqlResult(pSql); + pRes->final = finalBk; - pRes->numOfTotal = num; - + + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + taos_free_result(pSql->pSubs[i]); + } + tfree(pSql->pSubs); pSql->subState.numOfSub = 0; + pSql->fp = fp; tscDebug("0x%"PRIx64" try data in the next subclause", pSql->self); @@ -4048,7 +4387,7 @@ STableMeta* tscTableMetaDup(STableMeta* pTableMeta) { assert(pTableMeta != NULL); size_t size = tscGetTableMetaSize(pTableMeta); - STableMeta* p = calloc(1, size); + STableMeta* p = malloc(size); memcpy(p, pTableMeta, size); return p; } @@ -4058,7 +4397,10 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) { size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo* pInfo = calloc(1, size); - memcpy(pInfo, pVgroupsInfo, size); + pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups; + for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) { + tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); + } return pInfo; } @@ -4117,7 +4459,7 @@ int32_t createProjectionExpr(SQueryInfo* pQueryInfo, STableMetaInfo* pTableMetaI int32_t inter = 0; getResultDataInfo(pSource->base.colType, pSource->base.colBytes, functionId, 0, &pse->resType, - &pse->resBytes, &inter, 0, false); + &pse->resBytes, &inter, 0, false, NULL); pse->colType = pse->resType; pse->colBytes = pse->resBytes; @@ -4184,8 +4526,14 @@ static int32_t createGlobalAggregateExpr(SQueryAttr* pQueryAttr, SQueryInfo* pQu functionId = TSDB_FUNC_STDDEV; } + SUdfInfo* pUdfInfo = NULL; + + if (functionId < 0) { + pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, -1 * functionId - 1); + } + getResultDataInfo(pExpr->base.colType, pExpr->base.colBytes, functionId, 0, &pse->resType, &pse->resBytes, &inter, - 0, false); + 0, false, pUdfInfo); } } @@ -4232,6 +4580,22 @@ static int32_t createTagColumnInfo(SQueryAttr* pQueryAttr, SQueryInfo* pQueryInf return TSDB_CODE_SUCCESS; } +int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) { + int16_t numOfCols = (int16_t)taosArrayGetSize(pQueryInfo->colList); + int32_t len = 0; + + for(int32_t i = 0; i < numOfCols; ++i) { + SColumn* pCol = taosArrayGetP(pQueryInfo->colList, i); + for (int32_t j = 0; j < pCol->info.flist.numOfFilters; ++j) { + len += sizeof(SColumnFilterInfo); + if (pCol->info.flist.filterInfo[j].filterstr) { + len += (int32_t)pCol->info.flist.filterInfo[j].len + 1 * TSDB_NCHAR_SIZE; + } + } + } + return len; +} + int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) { memset(pQueryAttr, 0, sizeof(SQueryAttr)); @@ -4250,7 +4614,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); - pQueryAttr->distinctTag = pQueryInfo->distinctTag; + pQueryAttr->distinct = pQueryInfo->distinct; pQueryAttr->sw = pQueryInfo->sessionWindow; pQueryAttr->stateWindow = pQueryInfo->stateWindow; @@ -4261,7 +4625,8 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->order = pQueryInfo->order; pQueryAttr->fillType = pQueryInfo->fillType; pQueryAttr->havingNum = pQueryInfo->havingFieldNum; - + pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo; + if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor pQueryAttr->window = pQueryInfo->window; } else { @@ -4327,7 +4692,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt if (pQueryAttr->fillType != TSDB_FILL_NONE) { pQueryAttr->fillVal = calloc(pQueryAttr->numOfOutput, sizeof(int64_t)); - memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryAttr->numOfOutput * sizeof(int64_t)); + memcpy(pQueryAttr->fillVal, pQueryInfo->fillVal, pQueryInfo->numOfFillVal * sizeof(int64_t)); } pQueryAttr->srcRowSize = 0; @@ -4373,8 +4738,13 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq strncpy(tablename, *str, TSDB_TABLE_FNAME_LEN); len = (int32_t) strlen(tablename); } else { - memcpy(tablename, *str, nextStr - (*str)); len = (int32_t)(nextStr - (*str)); + if (len >= TSDB_TABLE_NAME_LEN) { + sprintf(pCmd->payload, "table name too long"); + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + memcpy(tablename, *str, nextStr - (*str)); tablename[len] = '\0'; } @@ -4386,9 +4756,8 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq // Check if the table name available or not if (tscValidateName(&sToken) != TSDB_CODE_SUCCESS) { - code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; sprintf(pCmd->payload, "table name is invalid"); - return code; + return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; } SName name = {0}; @@ -4404,6 +4773,20 @@ static int32_t doAddTableName(char* nextStr, char** str, SArray* pNameArray, SSq return TSDB_CODE_SUCCESS; } +int32_t nameComparFn(const void* n1, const void* n2) { + int32_t ret = strcmp(*(char**)n1, *(char**)n2); + if (ret == 0) { + return 0; + } else { + return ret > 0? 1:-1; + } +} + +static void freeContent(void* p) { + char* ptr = *(char**)p; + tfree(ptr); +} + int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t length, SArray* pNameArray) { SSqlCmd *pCmd = &pSql->cmd; @@ -4439,12 +4822,19 @@ int tscTransferTableNameList(SSqlObj *pSql, const char *pNameList, int32_t lengt } } - if (taosArrayGetSize(pNameArray) > TSDB_MULTI_TABLEMETA_MAX_NUM) { + size_t len = taosArrayGetSize(pNameArray); + if (len == 1) { + return TSDB_CODE_SUCCESS; + } + + if (len > TSDB_MULTI_TABLEMETA_MAX_NUM) { code = TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH; sprintf(pCmd->payload, "tables over the max number"); return code; } + taosArraySort(pNameArray, nameComparFn); + taosArrayRemoveDuplicate(pNameArray, nameComparFn, freeContent); return TSDB_CODE_SUCCESS; } diff --git a/src/client/tests/CMakeLists.txt b/src/client/tests/CMakeLists.txt index 1a6c45aadef1989253c661c21a1d39f0f30fd1be..24bfb44ac90e11e01ba99423aa68bd5a9511f746 100644 --- a/src/client/tests/CMakeLists.txt +++ b/src/client/tests/CMakeLists.txt @@ -1,10 +1,11 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) -FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) +FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) +FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) -IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) +IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build unit test") # GoogleTest requires at least C++11 @@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) ADD_EXECUTABLE(cliTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(cliTest taos tutil common gtest pthread) -ENDIF() \ No newline at end of file +ENDIF() diff --git a/src/client/tests/cliTest.cpp b/src/client/tests/cliTest.cpp index 30f248b5418b54b1be26dfcf15348b03fd70af4d..c2799c7ff6cf75017fc4aacfca04caddf6189389 100644 --- a/src/client/tests/cliTest.cpp +++ b/src/client/tests/cliTest.cpp @@ -1,5 +1,6 @@ #include #include +#include #include "taos.h" #include "tglobal.h" @@ -132,7 +133,7 @@ void validateResultFields() { taos_free_result(res); char sql[512] = {0}; - sprintf(sql, "insert into t1 values(%ld, 99, 'abc', 'test')", start_ts); + sprintf(sql, "insert into t1 values(%" PRId64 ", 99, 'abc', 'test')", start_ts); res = taos_query(conn, sql); ASSERT_EQ(taos_errno(res), 0); diff --git a/src/client/tests/timeParseTest.cpp b/src/client/tests/timeParseTest.cpp index 692398e3b7329b31ac02d4c36635000d4721d8fc..f6de5d46a7c569ae5abe38f7e9cd2b6dc56a5586 100644 --- a/src/client/tests/timeParseTest.cpp +++ b/src/client/tests/timeParseTest.cpp @@ -1,8 +1,10 @@ -#include "os.h" + #include #include #include +#include +#include "os.h" #include "taos.h" #include "ttoken.h" #include "tutil.h" @@ -15,10 +17,10 @@ int main(int argc, char** argv) { extern void deltaToUtcInitOnce(); /* test parse time function */ TEST(testCase, parse_time) { - + taos_options(TSDB_OPTION_TIMEZONE, "GMT-8"); deltaToUtcInitOnce(); - + char t1[] = "2018-1-1 1:1:1.952798"; char t13[] = "1970-1-1 0:0:0"; @@ -77,15 +79,15 @@ TEST(testCase, parse_time) { taosParseTime(t12, &time1, strlen(t12), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, time1); - taos_options(TSDB_OPTION_TIMEZONE, "UTC"); + taos_options(TSDB_OPTION_TIMEZONE, "UTC"); deltaToUtcInitOnce(); - + taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 0); taos_options(TSDB_OPTION_TIMEZONE, "Asia/Shanghai"); deltaToUtcInitOnce(); - + char t14[] = "1970-1-1T0:0:0Z"; taosParseTime(t14, &time, strlen(t14), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 0); @@ -98,7 +100,7 @@ TEST(testCase, parse_time) { taosParseTime(t41, &time, strlen(t41), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 852048000999); -// int64_t k = timezone; + // int64_t k = timezone; char t42[] = "1997-1-1T0:0:0.999999999Z"; taosParseTime(t42, &time, strlen(t42), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 852048000999 - timezone * MILLISECOND_PER_SECOND); @@ -135,7 +137,7 @@ TEST(testCase, parse_time) { //======================== add some case ============================// - + char b1[] = "9999-12-31 23:59:59.999"; taosParseTime(b1, &time, strlen(b1), TSDB_TIME_PRECISION_MILLI,0); EXPECT_EQ(time, 253402271999999); @@ -145,27 +147,27 @@ TEST(testCase, parse_time) { taosParseTime(b2, &time, strlen(b2), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 1577811661321); - taos_options(TSDB_OPTION_TIMEZONE, "America/New_York"); + taos_options(TSDB_OPTION_TIMEZONE, "America/New_York"); deltaToUtcInitOnce(); - + taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, 18000 * MILLISECOND_PER_SECOND); - taos_options(TSDB_OPTION_TIMEZONE, "Asia/Tokyo"); + taos_options(TSDB_OPTION_TIMEZONE, "Asia/Tokyo"); deltaToUtcInitOnce(); - + taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, -32400 * MILLISECOND_PER_SECOND); taos_options(TSDB_OPTION_TIMEZONE, "Asia/Shanghai"); deltaToUtcInitOnce(); - + taosParseTime(t13, &time, strlen(t13), TSDB_TIME_PRECISION_MILLI, 0); EXPECT_EQ(time, -28800 * MILLISECOND_PER_SECOND); char t[] = "2021-01-08T02:11:40.000+00:00"; taosParseTime(t, &time, strlen(t), TSDB_TIME_PRECISION_MILLI, 0); - printf("%ld\n", time); + printf("%" PRId64 "\n", time); } diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 0da7bda994db83882e36e9d52a7983635ad85330..4dce63e54f5db2b56c569bf6564899236c24a421 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index 4331ddbf8923ee758ebaf3116740db8e24df29a9..bd2c2e46f8546eb74bb32f1ef007aada14a397a0 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -41,8 +41,10 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DB, "create-db" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_TABLE, "create-table" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_FUNCTION, "create-function" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_DB, "drop-db" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_TABLE, "drop-table" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_FUNCTION, "drop-function" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_ACCT, "create-acct" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_USER, "create-user" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_ACCT, "drop-acct" ) @@ -74,6 +76,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_STABLEVGROUP, "stable-vgroup" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MULTI_META, "multi-meta" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_HB, "heart-beat" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_FUNC, "retrieve-function" ) // SQL below for client local TSDB_DEFINE_SQL_TYPE( TSDB_SQL_LOCAL, "local" ) diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index 8ee73291566afcbf604b607c0bc6a426ca372b87..829e771c70d687d1dd9a2f0f80b888a050a23329 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -31,6 +31,14 @@ extern "C" { memcpy(varDataVal(x), (str), __len); \ } while (0); +#define STR_TO_NET_VARSTR(x, str) \ + do { \ + VarDataLenT __len = (VarDataLenT)strlen(str); \ + *(VarDataLenT *)(x) = htons(__len); \ + memcpy(varDataVal(x), (str), __len); \ + } while (0); + + #define STR_WITH_MAXSIZE_TO_VARSTR(x, str, _maxs) \ do { \ char *_e = stpncpy(varDataVal(x), (str), (_maxs)-VARSTR_HEADER_SIZE); \ @@ -45,10 +53,10 @@ extern "C" { // ----------------- TSDB COLUMN DEFINITION typedef struct { - int8_t type; // Column type - int16_t colId; // column ID - int16_t bytes; // column bytes - int16_t offset; // point offset in SDataRow after the header part + int8_t type; // Column type + int16_t colId; // column ID + uint16_t bytes; // column bytes + uint16_t offset; // point offset in SDataRow after the header part. } STColumn; #define colType(col) ((col)->type) @@ -159,10 +167,11 @@ static FORCE_INLINE int tkeyComparFn(const void *tkey1, const void *tkey2) { return 0; } } + // ----------------- Data row structure /* A data row, the format is like below: - * |<--------------------+--------------------------- len ---------------------------------->| + * |<------------------------------------------------ len ---------------------------------->| * |<-- Head -->|<--------- flen -------------->| | * +---------------------+---------------------------------+---------------------------------+ * | uint16_t | int16_t | | | @@ -176,8 +185,8 @@ typedef void *SDataRow; #define TD_DATA_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) -#define dataRowLen(r) (*(uint16_t *)(r)) -#define dataRowVersion(r) *(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)) +#define dataRowLen(r) (*(TDRowLenT *)(r)) // 0~65535 +#define dataRowVersion(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) #define dataRowTuple(r) POINTER_SHIFT(r, TD_DATA_ROW_HEAD_SIZE) #define dataRowTKey(r) (*(TKEY *)(dataRowTuple(r))) #define dataRowKey(r) tdGetKey(dataRowTKey(r)) @@ -193,20 +202,19 @@ void tdInitDataRow(SDataRow row, STSchema *pSchema); SDataRow tdDataRowDup(SDataRow row); // offset here not include dataRow header length -static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset) { +static FORCE_INLINE int tdAppendColVal(SDataRow row, const void *value, int8_t type, int32_t offset) { ASSERT(value != NULL); int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE; - char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row)); if (IS_VAR_DATA_TYPE(type)) { *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row); - memcpy(ptr, value, varDataTLen(value)); + memcpy(POINTER_SHIFT(row, dataRowLen(row)), value, varDataTLen(value)); dataRowLen(row) += varDataTLen(value); } else { if (offset == 0) { ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); TKEY tvalue = tdGetTKEY(*(TSKEY *)value); - memcpy(POINTER_SHIFT(row, toffset), (void *)(&tvalue), TYPE_BYTES[type]); + memcpy(POINTER_SHIFT(row, toffset), (const void *)(&tvalue), TYPE_BYTES[type]); } else { memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]); } @@ -237,17 +245,21 @@ typedef struct SDataCol { TSKEY ts; // only used in last NULL column } SDataCol; +#define isAllRowsNull(pCol) ((pCol)->len == 0) static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; } void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints); -void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints); +void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints); void dataColSetOffset(SDataCol *pCol, int nEle); bool isNEleNull(SDataCol *pCol, int nEle); void dataColSetNEleNull(SDataCol *pCol, int nEle, int maxPoints); // Get the data pointer from a column-wised data -static FORCE_INLINE void *tdGetColDataOfRow(SDataCol *pCol, int row) { +static FORCE_INLINE const void *tdGetColDataOfRow(SDataCol *pCol, int row) { + if (isAllRowsNull(pCol)) { + return getNullValue(pCol->type); + } if (IS_VAR_DATA_TYPE(pCol->type)) { return POINTER_SHIFT(pCol->pData, pCol->dataOff[row]); } else { @@ -279,7 +291,7 @@ typedef struct { } SDataCols; #define keyCol(pCols) (&((pCols)->cols[0])) // Key column -#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] +#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)] // the idx row of column-wised data #define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx)) static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) { if (pCols->numOfRows) { @@ -289,6 +301,11 @@ static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) { } } +static FORCE_INLINE TSKEY dataColsKeyAtRow(SDataCols *pCols, int row) { + ASSERT(row < pCols->numOfRows); + return dataColsKeyAt(pCols, row); +} + static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) { if (pCols->numOfRows) { return dataColsKeyAt(pCols, 0); @@ -318,13 +335,13 @@ void tdResetDataCols(SDataCols *pCols); int tdInitDataCols(SDataCols *pCols, STSchema *pSchema); SDataCols *tdDupDataCols(SDataCols *pCols, bool keepData); SDataCols *tdFreeDataCols(SDataCols *pCols); -void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols); int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset); // ----------------- K-V data row structure -/* +/* |<-------------------------------------- len -------------------------------------------->| + * |<----- header ----->|<--------------------------- body -------------------------------->| * +----------+----------+---------------------------------+---------------------------------+ - * | int16_t | int16_t | | | + * | uint16_t | int16_t | | | * +----------+----------+---------------------------------+---------------------------------+ * | len | ncols | cols index | data part | * +----------+----------+---------------------------------+---------------------------------+ @@ -332,14 +349,14 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge typedef void *SKVRow; typedef struct { - int16_t colId; - int16_t offset; + int16_t colId; + uint16_t offset; } SColIdx; -#define TD_KV_ROW_HEAD_SIZE (2 * sizeof(int16_t)) +#define TD_KV_ROW_HEAD_SIZE (sizeof(uint16_t) + sizeof(int16_t)) -#define kvRowLen(r) (*(int16_t *)(r)) -#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t))) +#define kvRowLen(r) (*(TDRowLenT *)(r)) +#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(uint16_t))) #define kvRowSetLen(r, len) kvRowLen(r) = (len) #define kvRowSetNCols(r, n) kvRowNCols(r) = (n) #define kvRowColIdx(r) (SColIdx *)POINTER_SHIFT(r, TD_KV_ROW_HEAD_SIZE) @@ -349,6 +366,9 @@ typedef struct { #define kvRowColIdxAt(r, i) (kvRowColIdx(r) + (i)) #define kvRowFree(r) tfree(r) #define kvRowEnd(r) POINTER_SHIFT(r, kvRowLen(r)) +#define kvRowTKey(r) (*(TKEY *)(kvRowValues(r))) +#define kvRowKey(r) tdGetKey(kvRowTKey(r)) +#define kvRowDeleted(r) TKEY_IS_DELETED(kvRowTKey(r)) SKVRow tdKVRowDup(SKVRow row); int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value); @@ -372,13 +392,44 @@ static FORCE_INLINE void *tdGetKVRowValOfCol(SKVRow row, int16_t colId) { return kvRowColVal(row, (SColIdx *)ret); } +static FORCE_INLINE void *tdGetKVRowIdxOfCol(SKVRow row, int16_t colId) { + return taosbsearch(&colId, kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), comparTagId, TD_EQ); +} + +// offset here not include kvRow header length +static FORCE_INLINE int tdAppendKvColVal(SKVRow row, const void *value, int16_t colId, int8_t type, int32_t offset) { + ASSERT(value != NULL); + int32_t toffset = offset + TD_KV_ROW_HEAD_SIZE; + SColIdx *pColIdx = (SColIdx *)POINTER_SHIFT(row, toffset); + char * ptr = (char *)POINTER_SHIFT(row, kvRowLen(row)); + + pColIdx->colId = colId; + pColIdx->offset = kvRowLen(row); // offset of pColIdx including the TD_KV_ROW_HEAD_SIZE + + if (IS_VAR_DATA_TYPE(type)) { + memcpy(ptr, value, varDataTLen(value)); + kvRowLen(row) += varDataTLen(value); + } else { + if (offset == 0) { + ASSERT(type == TSDB_DATA_TYPE_TIMESTAMP); + TKEY tvalue = tdGetTKEY(*(TSKEY *)value); + memcpy(ptr, (void *)(&tvalue), TYPE_BYTES[type]); + } else { + memcpy(ptr, value, TYPE_BYTES[type]); + } + kvRowLen(row) += TYPE_BYTES[type]; + } + + return 0; +} + // ----------------- K-V data row builder typedef struct { int16_t tCols; int16_t nCols; SColIdx *pColIdx; - int16_t alloc; - int16_t size; + uint16_t alloc; + uint16_t size; void * buf; } SKVRowBuilder; @@ -414,8 +465,146 @@ static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, return 0; } +// ----------------- SMemRow appended with sequential data row structure +/* + * |---------|------------------------------------------------- len ---------------------------------->| + * |<-------- Head ------>|<--------- flen -------------->| | + * |---------+---------------------+---------------------------------+---------------------------------+ + * | uint8_t | uint16_t | int16_t | | | + * |---------+----------+----------+---------------------------------+---------------------------------+ + * | flag | len | sversion | First part | Second part | + * +---------+----------+----------+---------------------------------+---------------------------------+ + * + * NOTE: timestamp in this row structure is TKEY instead of TSKEY + */ + +// ----------------- SMemRow appended with extended K-V data row structure +/* |--------------------|------------------------------------------------ len ---------------------------------->| + * |<------------- Head ------------>|<--------- flen -------------->| | + * |--------------------+----------+--------------------------------------------+---------------------------------+ + * | uint8_t | int16_t | uint16_t | int16_t | | | + * |---------+----------+----------+----------+---------------------------------+---------------------------------+ + * | flag | sversion | len | ncols | cols index | data part | + * |---------+----------+----------+----------+---------------------------------+---------------------------------+ + */ + +typedef void *SMemRow; + +#define TD_MEM_ROW_TYPE_SIZE sizeof(uint8_t) +#define TD_MEM_ROW_KV_VER_SIZE sizeof(int16_t) +#define TD_MEM_ROW_KV_TYPE_VER_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE) +#define TD_MEM_ROW_DATA_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_DATA_ROW_HEAD_SIZE) +// #define TD_MEM_ROW_KV_HEAD_SIZE (TD_MEM_ROW_TYPE_SIZE + TD_MEM_ROW_KV_VER_SIZE + TD_KV_ROW_HEAD_SIZE) + +#define SMEM_ROW_DATA 0U // SDataRow +#define SMEM_ROW_KV 1U // SKVRow + +#define memRowType(r) (*(uint8_t *)(r)) +#define isDataRow(r) (SMEM_ROW_DATA == memRowType(r)) +#define isKvRow(r) (SMEM_ROW_KV == memRowType(r)) + +#define memRowDataBody(r) POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE) // section after flag +#define memRowKvBody(r) \ + POINTER_SHIFT(r, TD_MEM_ROW_KV_TYPE_VER_SIZE) // section after flag + sversion as to reuse SKVRow + +#define memRowDataLen(r) (*(TDRowLenT *)memRowDataBody(r)) // 0~65535 +#define memRowKvLen(r) (*(TDRowLenT *)memRowKvBody(r)) // 0~65535 + +#define memRowDataTLen(r) \ + ((TDRowTLenT)(memRowDataLen(r) + TD_MEM_ROW_TYPE_SIZE)) // using uint32_t/int32_t to store the TLen + +#define memRowKvTLen(r) ((TDRowTLenT)(memRowKvLen(r) + TD_MEM_ROW_KV_TYPE_VER_SIZE)) + +#define memRowLen(r) (isDataRow(r) ? memRowDataLen(r) : memRowKvLen(r)) +#define memRowTLen(r) (isDataRow(r) ? memRowDataTLen(r) : memRowKvTLen(r)) // using uint32_t/int32_t to store the TLen + +#define memRowDataVersion(r) dataRowVersion(memRowDataBody(r)) +#define memRowKvVersion(r) (*(int16_t *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) +#define memRowVersion(r) (isDataRow(r) ? memRowDataVersion(r) : memRowKvVersion(r)) // schema version +#define memRowSetKvVersion(r, v) (memRowKvVersion(r) = (v)) +#define memRowTuple(r) (isDataRow(r) ? dataRowTuple(memRowDataBody(r)) : kvRowValues(memRowKvBody(r))) + +#define memRowTKey(r) (isDataRow(r) ? dataRowTKey(memRowDataBody(r)) : kvRowTKey(memRowKvBody(r))) +#define memRowKey(r) (isDataRow(r) ? dataRowKey(memRowDataBody(r)) : kvRowKey(memRowKvBody(r))) +#define memRowSetTKey(r, k) \ + do { \ + if (isDataRow(r)) { \ + dataRowTKey(memRowDataBody(r)) = (k); \ + } else { \ + kvRowTKey(memRowKvBody(r)) = (k); \ + } \ + } while (0) + +#define memRowSetType(r, t) (memRowType(r) = (t)) +#define memRowSetLen(r, l) (isDataRow(r) ? memRowDataLen(r) = (l) : memRowKvLen(r) = (l)) +#define memRowSetVersion(r, v) (isDataRow(r) ? dataRowSetVersion(memRowDataBody(r), v) : memRowKvSetVersion(r, v)) +#define memRowCpy(dst, r) memcpy((dst), (r), memRowTLen(r)) +#define memRowMaxBytesFromSchema(s) (schemaTLen(s) + TD_MEM_ROW_DATA_HEAD_SIZE) +#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r)) + +SMemRow tdMemRowDup(SMemRow row); +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols); +// NOTE: offset here including the header size +static FORCE_INLINE void *tdGetKvRowDataOfCol(void *row, int32_t offset) { return POINTER_SHIFT(row, offset); } +// NOTE: offset here including the header size +static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int8_t type, int32_t offset) { + if (isDataRow(row)) { + return tdGetRowDataOfCol(row, type, offset); + } else if (isKvRow(row)) { + return tdGetKvRowDataOfCol(row, offset); + } else { + ASSERT(0); + } + return NULL; +} + +// ----------------- Raw payload structure for row: +/* |<------------ Head ------------->|<----------- body of column data tuple ------------------->| + * | |<----------------- flen ------------->|<--- value part --->| + * |SMemRowType| dataTLen | nCols | colId | colType | offset | ... | value |...|...|... | + * +-----------+----------+----------+--------------------------------------|--------------------| + * | uint8_t | uint32_t | uint16_t | int16_t | uint8_t | uint16_t | ... |.......|...|...|... | + * +-----------+----------+----------+--------------------------------------+--------------------| + * 1. offset in column data tuple starts from the value part in case of uint16_t overflow. + * 2. dataTLen: total length including the header and body. + */ + +#define PAYLOAD_NCOLS_LEN sizeof(uint16_t) +#define PAYLOAD_NCOLS_OFFSET (sizeof(uint8_t) + sizeof(TDRowTLenT)) +#define PAYLOAD_HEADER_LEN (PAYLOAD_NCOLS_OFFSET + PAYLOAD_NCOLS_LEN) +#define PAYLOAD_ID_LEN sizeof(int16_t) +#define PAYLOAD_ID_TYPE_LEN (sizeof(int16_t) + sizeof(uint8_t)) +#define PAYLOAD_COL_HEAD_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(uint16_t)) +#define PAYLOAD_PRIMARY_COL_LEN (PAYLOAD_ID_TYPE_LEN + sizeof(TSKEY)) + +#define payloadBody(r) POINTER_SHIFT(r, PAYLOAD_HEADER_LEN) +#define payloadType(r) (*(uint8_t *)(r)) +#define payloadSetType(r, t) (payloadType(r) = (t)) +#define payloadTLen(r) (*(TDRowTLenT *)POINTER_SHIFT(r, TD_MEM_ROW_TYPE_SIZE)) // including total header +#define payloadSetTLen(r, l) (payloadTLen(r) = (l)) +#define payloadNCols(r) (*(TDRowLenT *)POINTER_SHIFT(r, PAYLOAD_NCOLS_OFFSET)) +#define payloadSetNCols(r, n) (payloadNCols(r) = (n)) +#define payloadValuesOffset(r) \ + (PAYLOAD_HEADER_LEN + payloadNCols(r) * PAYLOAD_COL_HEAD_LEN) // avoid using the macro in loop +#define payloadValues(r) POINTER_SHIFT(r, payloadValuesOffset(r)) // avoid using the macro in loop +#define payloadColId(c) (*(int16_t *)(c)) +#define payloadColType(c) (*(uint8_t *)POINTER_SHIFT(c, PAYLOAD_ID_LEN)) +#define payloadColOffset(c) (*(uint16_t *)POINTER_SHIFT(c, PAYLOAD_ID_TYPE_LEN)) +#define payloadColValue(c) POINTER_SHIFT(c, payloadColOffset(c)) + +#define payloadColSetId(c, i) (payloadColId(c) = (i)) +#define payloadColSetType(c, t) (payloadColType(c) = (t)) +#define payloadColSetOffset(c, o) (payloadColOffset(c) = (o)) + +#define payloadTSKey(r) (*(TSKEY *)POINTER_SHIFT(r, payloadValuesOffset(r))) +#define payloadTKey(r) (*(TKEY *)POINTER_SHIFT(r, payloadValuesOffset(r))) +#define payloadKey(r) tdGetKey(payloadTKey(r)) + + +static FORCE_INLINE char *payloadNextCol(char *pCol) { return (char *)POINTER_SHIFT(pCol, PAYLOAD_COL_HEAD_LEN); } + #ifdef __cplusplus } #endif -#endif // _TD_DATA_FORMAT_H_ +#endif // _TD_DATA_FORMAT_H_ \ No newline at end of file diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 47460a5fab34289e5534fc753ff4b4bd86452d65..7290db6ec978b85ad71ea744b7ea37c6488cb9fa 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -155,7 +155,7 @@ extern char tsMnodeTmpDir[]; extern char tsDataDir[]; extern char tsLogDir[]; extern char tsScriptDir[]; -extern int64_t tsMsPerDay[3]; +extern int64_t tsTickPerDay[3]; // system info extern char tsOsName[]; @@ -205,6 +205,16 @@ extern int32_t wDebugFlag; extern int32_t cqDebugFlag; extern int32_t debugFlag; +#ifdef TD_TSZ +// lossy +extern char lossyColumns[]; +extern double fPrecision; +extern double dPrecision; +extern uint32_t maxRange; +extern uint32_t curRange; +extern char Compressor[]; +#endif + typedef struct { char dir[TSDB_FILENAME_LEN]; int level; diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 8c970595523bbc2a8c9c0fe9edd53dd1f3499bd5..b29a535ec2c80f7fb058e3d1c55e5d16ed71c3c4 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -46,7 +46,7 @@ typedef struct SSqlExpr { char aliasName[TSDB_COL_NAME_LEN]; // as aliasName char token[TSDB_COL_NAME_LEN]; // original token SColIndex colInfo; - uint64_t uid; // refactor use the pointer + uint64_t uid; // table uid, todo refactor use the pointer int16_t functionId; // function id in aAgg array diff --git a/src/common/src/tarithoperator.c b/src/common/src/tarithoperator.c index b37e358b9c8688b888fb6122289dbe766f6306c9..3779303e1a41275996c52f828d433d2d68805fdf 100644 --- a/src/common/src/tarithoperator.c +++ b/src/common/src/tarithoperator.c @@ -18,7 +18,58 @@ #include "ttype.h" #include "tutil.h" #include "tarithoperator.h" +#include "tcompare.h" +//GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); +#define ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ + { \ + int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ + int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1; \ + \ + if ((len1) == (len2)) { \ + for (; i < (len2) && i >= 0; i += step, (out) += 1) { \ + if (isNull((char *)&((left)[i]), _left_type) || isNull((char *)&((right)[i]), _right_type)) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + *(out) = (double)(left)[i] op(right)[i]; \ + } \ + } else if ((len1) == 1) { \ + for (; i >= 0 && i < (len2); i += step, (out) += 1) { \ + if (isNull((char *)(left), _left_type) || isNull((char *)&(right)[i], _right_type)) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + *(out) = (double)(left)[0] op(right)[i]; \ + } \ + } else if ((len2) == 1) { \ + for (; i >= 0 && i < (len1); i += step, (out) += 1) { \ + if (isNull((char *)&(left)[i], _left_type) || isNull((char *)(right), _right_type)) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ + *(out) = (double)(left)[i] op(right)[0]; \ + } \ + } \ + } #define ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, op, _res_type, _ord) \ { \ int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : MAX(len1, len2) - 1; \ @@ -62,6 +113,12 @@ SET_DOUBLE_NULL(out); \ continue; \ } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[i])) * (right)[i]; \ } \ } else if (len1 == 1) { \ @@ -70,6 +127,12 @@ SET_DOUBLE_NULL(out); \ continue; \ } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[i])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ *(out) = (double)(left)[0] - ((int64_t)(((double)(left)[0]) / (right)[i])) * (right)[i]; \ } \ } else if ((len2) == 1) { \ @@ -78,6 +141,12 @@ SET_DOUBLE_NULL(out); \ continue; \ } \ + double v, z = 0.0; \ + GET_TYPED_DATA(v, double, _right_type, (char *)&((right)[0])); \ + if (getComparFunc(TSDB_DATA_TYPE_DOUBLE, 0)(&v, &z) == 0) { \ + SET_DOUBLE_NULL(out); \ + continue; \ + } \ *(out) = (double)(left)[i] - ((int64_t)(((double)(left)[i]) / (right)[0])) * (right)[0]; \ } \ } \ @@ -90,7 +159,7 @@ #define ARRAY_LIST_MULTI(left, right, _left_type, _right_type, len1, len2, out, _ord) \ ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, *, TSDB_DATA_TYPE_DOUBLE, _ord) #define ARRAY_LIST_DIV(left, right, _left_type, _right_type, len1, len2, out, _ord) \ - ARRAY_LIST_OP(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord) + ARRAY_LIST_OP_DIV(left, right, _left_type, _right_type, len1, len2, out, /, TSDB_DATA_TYPE_DOUBLE, _ord) #define ARRAY_LIST_REM(left, right, _left_type, _right_type, len1, len2, out, _ord) \ ARRAY_LIST_OP_REM(left, right, _left_type, _right_type, len1, len2, out, %, TSDB_DATA_TYPE_DOUBLE, _ord) diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c index 7ae34d532ca7ca52d3414a9d8f7366db15e046b8..9f7432c90d5588aff29ae87575f01deb60c62ebc 100644 --- a/src/common/src/tdataformat.c +++ b/src/common/src/tdataformat.c @@ -198,6 +198,14 @@ SDataRow tdDataRowDup(SDataRow row) { return trow; } +SMemRow tdMemRowDup(SMemRow row) { + SMemRow trow = malloc(memRowTLen(row)); + if (trow == NULL) return NULL; + + memRowCpy(trow, row); + return trow; +} + void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) { pDataCol->type = colType(pCol); pDataCol->colId = colColId(pCol); @@ -217,11 +225,22 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, void **pBuf, int maxPoints) *pBuf = POINTER_SHIFT(*pBuf, pDataCol->spaceSize); } } - // value from timestamp should be TKEY here instead of TSKEY -void dataColAppendVal(SDataCol *pCol, void *value, int numOfRows, int maxPoints) { +void dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) { ASSERT(pCol != NULL && value != NULL); + if (isAllRowsNull(pCol)) { + if (isNull(value, pCol->type)) { + // all null value yet, just return + return; + } + + if (numOfRows > 0) { + // Find the first not null value, fill all previouse values as NULL + dataColSetNEleNull(pCol, numOfRows, maxPoints); + } + } + if (IS_VAR_DATA_TYPE(pCol->type)) { // set offset pCol->dataOff[numOfRows] = pCol->len; @@ -243,7 +262,7 @@ bool isNEleNull(SDataCol *pCol, int nEle) { return true; } -void dataColSetNullAt(SDataCol *pCol, int index) { +FORCE_INLINE void dataColSetNullAt(SDataCol *pCol, int index) { if (IS_VAR_DATA_TYPE(pCol->type)) { pCol->dataOff[index] = pCol->len; char *ptr = POINTER_SHIFT(pCol->pData, pCol->len); @@ -399,8 +418,7 @@ void tdResetDataCols(SDataCols *pCols) { } } } - -void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { +static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) { ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row)); int rcol = 0; @@ -419,7 +437,8 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) while (dcol < pCols->numOfCols) { SDataCol *pDataCol = &(pCols->cols[dcol]); if (rcol >= schemaNCols(pSchema)) { - dataColSetNullAt(pDataCol, pCols->numOfRows); + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dcol++; continue; } @@ -433,7 +452,8 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) } else if (pRowCol->colId < pDataCol->colId) { rcol++; } else { - dataColSetNullAt(pDataCol, pCols->numOfRows); + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); dcol++; } } @@ -441,6 +461,62 @@ void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols) pCols->numOfRows++; } +static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols) { + ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row)); + + int rcol = 0; + int dcol = 0; + + if (kvRowDeleted(row)) { + for (; dcol < pCols->numOfCols; dcol++) { + SDataCol *pDataCol = &(pCols->cols[dcol]); + if (dcol == 0) { + dataColAppendVal(pDataCol, kvRowValues(row), pCols->numOfRows, pCols->maxPoints); + } else { + dataColSetNullAt(pDataCol, pCols->numOfRows); + } + } + } else { + int nRowCols = kvRowNCols(row); + + while (dcol < pCols->numOfCols) { + SDataCol *pDataCol = &(pCols->cols[dcol]); + if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) { + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + ++dcol; + continue; + } + + SColIdx *colIdx = kvRowColIdxAt(row, rcol); + + if (colIdx->colId == pDataCol->colId) { + void *value = tdGetKvRowDataOfCol(row, colIdx->offset); + dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints); + ++dcol; + ++rcol; + } else if (colIdx->colId < pDataCol->colId) { + ++rcol; + } else { + // dataColSetNullAt(pDataCol, pCols->numOfRows); + dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints); + ++dcol; + } + } + } + pCols->numOfRows++; +} + +void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols) { + if (isDataRow(row)) { + tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols); + } else if (isKvRow(row)) { + tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols); + } else { + ASSERT(0); + } +} + int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *pOffset) { ASSERT(rowsToMerge > 0 && rowsToMerge <= source->numOfRows); ASSERT(target->numOfCols == source->numOfCols); @@ -452,7 +528,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int * SDataCols *pTarget = NULL; - if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyFirst(source))) { // No overlap + if ((target->numOfRows == 0) || (dataColsKeyLast(target) < dataColsKeyAtRow(source, *pOffset))) { // No overlap ASSERT(target->numOfRows + rowsToMerge <= target->maxPoints); for (int i = 0; i < rowsToMerge; i++) { for (int j = 0; j < source->numOfCols; j++) { @@ -563,7 +639,7 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { nrow = malloc(kvRowLen(row) + sizeof(SColIdx) + diff); if (nrow == NULL) return -1; - kvRowSetLen(nrow, kvRowLen(row) + (int16_t)sizeof(SColIdx) + diff); + kvRowSetLen(nrow, kvRowLen(row) + (uint16_t)sizeof(SColIdx) + diff); kvRowSetNCols(nrow, kvRowNCols(row) + 1); if (ptr == NULL) { @@ -605,8 +681,8 @@ int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) { if (varDataTLen(value) == varDataTLen(pOldVal)) { // just update the column value in place memcpy(pOldVal, value, varDataTLen(value)); } else { // need to reallocate the memory - int16_t diff = varDataTLen(value) - varDataTLen(pOldVal); - int16_t nlen = kvRowLen(row) + diff; + uint16_t diff = varDataTLen(value) - varDataTLen(pOldVal); + uint16_t nlen = kvRowLen(row) + diff; ASSERT(nlen > 0); nrow = malloc(nlen); if (nrow == NULL) return -1; @@ -708,4 +784,4 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) { memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size); return row; -} +} \ No newline at end of file diff --git a/src/common/src/texpr.c b/src/common/src/texpr.c index 0e9ec053cc5fb0f402fe9ffd42f3435fb4ca3cad..973b88fef9357b5e4b17ad0f9266d18cb98c2b1a 100644 --- a/src/common/src/texpr.c +++ b/src/common/src/texpr.c @@ -476,7 +476,14 @@ void buildFilterSetFromBinary(void **q, const char *buf, int32_t len) { int dummy = -1; int32_t sz = tbufReadInt32(&br); for (int32_t i = 0; i < sz; i++) { - if (type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_SMALLINT || type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_INT) { + if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type)) { + int64_t val = tbufReadInt64(&br); + taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy)); + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + uint64_t val = tbufReadUint64(&br); + taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy)); + } + else if (type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t val = tbufReadInt64(&br); taosHashPut(pObj, (char *)&val, sizeof(val), &dummy, sizeof(dummy)); } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index af21ef6d82f96de6c2d0349911b3176f14510ee2..34ccfbc3fe5c5279fd668663546c8064181daa13 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -84,8 +84,8 @@ int32_t tsMaxNumOfOrderedResults = 100000; // 10 ms for sliding time, the value will changed in case of time precision changed int32_t tsMinSlidingTime = 10; -// 10 ms for interval time range, changed accordingly -int32_t tsMinIntervalTime = 10; +// 1 us for interval time range, changed accordingly +int32_t tsMinIntervalTime = 1; // 20sec, the maximum value of stream computing delay, changed accordingly int32_t tsMaxStreamComputDelay = 20000; @@ -180,15 +180,15 @@ int8_t tsEnableStream = 1; int8_t tsCompactMnodeWal = 0; int8_t tsPrintAuth = 0; int8_t tscEmbedded = 0; -char configDir[TSDB_FILENAME_LEN] = {0}; -char tsVnodeDir[TSDB_FILENAME_LEN] = {0}; -char tsDnodeDir[TSDB_FILENAME_LEN] = {0}; -char tsMnodeDir[TSDB_FILENAME_LEN] = {0}; -char tsMnodeTmpDir[TSDB_FILENAME_LEN] = {0}; -char tsMnodeBakDir[TSDB_FILENAME_LEN] = {0}; -char tsDataDir[TSDB_FILENAME_LEN] = {0}; -char tsScriptDir[TSDB_FILENAME_LEN] = {0}; -char tsTempDir[TSDB_FILENAME_LEN] = "/tmp/"; +char configDir[PATH_MAX] = {0}; +char tsVnodeDir[PATH_MAX] = {0}; +char tsDnodeDir[PATH_MAX] = {0}; +char tsMnodeDir[PATH_MAX] = {0}; +char tsMnodeTmpDir[PATH_MAX] = {0}; +char tsMnodeBakDir[PATH_MAX] = {0}; +char tsDataDir[PATH_MAX] = {0}; +char tsScriptDir[PATH_MAX] = {0}; +char tsTempDir[PATH_MAX] = "/tmp/"; int32_t tsDiskCfgNum = 0; @@ -204,7 +204,7 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS]; * TSDB_TIME_PRECISION_MICRO: 86400000000L * TSDB_TIME_PRECISION_NANO: 86400000000000L */ -int64_t tsMsPerDay[] = {86400000L, 86400000000L, 86400000000000L}; +int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L}; // system info char tsOsName[10] = "Linux"; @@ -244,6 +244,19 @@ int32_t tsdbDebugFlag = 131; int32_t cqDebugFlag = 131; int32_t fsDebugFlag = 135; +#ifdef TD_TSZ +// +// lossy compress 6 +// +char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress. +// below option can take effect when tsLossyColumns not empty +double fPrecision = 1E-8; // float column precision +double dPrecision = 1E-16; // double column precision +uint32_t maxRange = 500; // max range +uint32_t curRange = 100; // range +char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR +#endif + int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; void (*monExecuteSQLFp)(char *sql) = NULL; @@ -1517,6 +1530,62 @@ static void doInitGlobalConfig(void) { cfg.ptrLength = tListLen(tsTempDir); cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + +#ifdef TD_TSZ + // lossy compress + cfg.option = "lossyColumns"; + cfg.ptr = lossyColumns; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = tListLen(lossyColumns); + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + + cfg.option = "fPrecision"; + cfg.ptr = &fPrecision; + cfg.valType = TAOS_CFG_VTYPE_DOUBLE; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = MIN_FLOAT; + cfg.maxValue = MAX_FLOAT; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + + + taosInitConfigOption(cfg); + + cfg.option = "dPrecision"; + cfg.ptr = &dPrecision; + cfg.valType = TAOS_CFG_VTYPE_DOUBLE; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = MIN_FLOAT; + cfg.maxValue = MAX_FLOAT; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + + cfg.option = "maxRange"; + cfg.ptr = &maxRange; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 0; + cfg.maxValue = 65536; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + + cfg.option = "range"; + cfg.ptr = &curRange; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 0; + cfg.maxValue = 65536; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); +#endif + } void taosInitGlobalCfg() { diff --git a/src/common/src/tname.c b/src/common/src/tname.c index c1c6ffa4b343dba47728055b50639ad12cfec9fe..26502c5d9cd032afd20d89ba8ea2da72b82a62c1 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -256,7 +256,7 @@ int32_t tNameExtractFullName(const SName* name, char* dst) { return -1; } - int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + 1 + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname); + int32_t len = snprintf(dst, TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN, "%s.%s", name->acctId, name->dbname); size_t tnameLen = strlen(name->tname); if (tnameLen > 0) { @@ -306,7 +306,7 @@ bool tIsValidName(const SName* name) { SName* tNameDup(const SName* name) { assert(name != NULL); - SName* p = calloc(1, sizeof(SName)); + SName* p = malloc(sizeof(SName)); memcpy(p, name, sizeof(SName)); return p; } diff --git a/src/common/src/ttypes.c b/src/common/src/ttypes.c index 6fa27a029bfd5356cca3e34dffe8d3018ade9fd8..13bcfe64800dfee693d2622a42df7ad9c7617fae 100644 --- a/src/common/src/ttypes.c +++ b/src/common/src/ttypes.c @@ -38,7 +38,11 @@ const int32_t TYPE_BYTES[15] = { #define DO_STATICS(__sum, __min, __max, __minIndex, __maxIndex, _list, _index) \ do { \ - (__sum) += (_list)[(_index)]; \ + if (_list[(_index)] >= (INT64_MAX - (__sum))) { \ + __sum = INT64_MAX; \ + } else { \ + (__sum) += (_list)[(_index)]; \ + } \ if ((__min) > (_list)[(_index)]) { \ (__min) = (_list)[(_index)]; \ (__minIndex) = (_index); \ @@ -492,30 +496,32 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) { } } -static uint8_t nullBool = TSDB_DATA_BOOL_NULL; -static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL; -static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL; -static uint32_t nullInt = TSDB_DATA_INT_NULL; -static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL; -static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL; -static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL; -static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL; -static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL; -static uint32_t nullIntu = TSDB_DATA_UINT_NULL; -static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL; - -static union { - tstr str; - char pad[sizeof(tstr) + 4]; -} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}}; - -static void *nullValues[] = { +static uint8_t nullBool = TSDB_DATA_BOOL_NULL; +static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL; +static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL; +static uint32_t nullInt = TSDB_DATA_INT_NULL; +static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL; +static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL; +static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL; +static uint8_t nullTinyIntu = TSDB_DATA_UTINYINT_NULL; +static uint16_t nullSmallIntu = TSDB_DATA_USMALLINT_NULL; +static uint32_t nullIntu = TSDB_DATA_UINT_NULL; +static uint64_t nullBigIntu = TSDB_DATA_UBIGINT_NULL; +static SBinaryNullT nullBinary = {1, TSDB_DATA_BINARY_NULL}; +static SNCharNullT nullNchar = {4, TSDB_DATA_NCHAR_NULL}; + +// static union { +// tstr str; +// char pad[sizeof(tstr) + 4]; +// } nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}}; + +static const void *nullValues[] = { &nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt, &nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar, &nullTinyIntu, &nullSmallIntu, &nullIntu, &nullBigIntu, }; -void *getNullValue(int32_t type) { +const void *getNullValue(int32_t type) { assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_UBIGINT); return nullValues[type - 1]; } @@ -632,7 +638,15 @@ int32_t tStrToInteger(const char* z, int16_t type, int32_t n, int64_t* value, bo } // the string may be overflow according to errno - *value = issigned? strtoll(z, &endPtr, radix):strtoull(z, &endPtr, radix); + if (!issigned) { + const char *p = z; + while(*p != 0 && *p == ' ') p++; + if (*p != 0 && *p == '-') { return -1;} + + *value = strtoull(z, &endPtr, radix); + } else { + *value = strtoll(z, &endPtr, radix); + } // not a valid integer number, return error if (endPtr - z != n || errno == ERANGE) { diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index 33dab51633b244326b0490c6bf7b784bc41fe77f..9e0f7ffc7411ce5e2a5a81d0e58a67e231580d00 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -77,6 +77,10 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) { pVar->nLen = strRmquote(pVar->pz, token->n); break; } + case TSDB_DATA_TYPE_TIMESTAMP: { + pVar->i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO); + break; + } default: { // nType == 0 means the null value type = TSDB_DATA_TYPE_NULL; @@ -403,6 +407,7 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { wchar_t *pWStr = calloc(1, (nLen + 1) * TSDB_NCHAR_SIZE); bool ret = taosMbsToUcs4(pDst, nLen, (char *)pWStr, (nLen + 1) * TSDB_NCHAR_SIZE, NULL); if (!ret) { + tfree(pWStr); return -1; } @@ -602,7 +607,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu } errno = 0; // reset global error code - int64_t result; + int64_t result = 0; switch (type) { case TSDB_DATA_TYPE_BOOL: { @@ -870,7 +875,8 @@ int32_t tVariantTypeSetType(tVariant *pVariant, char type) { free(pVariant->pz); pVariant->dKey = v; } else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) { - pVariant->dKey = (double)(pVariant->i64); + double tmp = (double) pVariant->i64; + pVariant->dKey = tmp; } pVariant->nType = TSDB_DATA_TYPE_DOUBLE; diff --git a/src/connector/go b/src/connector/go index 7a26c432f8b4203e42344ff3290b9b9b01b983d5..b8f76da4a708d158ec3cc4b844571dc4414e36b4 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5 +Subproject commit b8f76da4a708d158ec3cc4b844571dc4414e36b4 diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index 3530c6df097134a410bacec6b3cd013ef38a61aa..4a4d79099b076b8ff12d5b4fdbcba54049a6866d 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit 3530c6df097134a410bacec6b3cd013ef38a61aa +Subproject commit 4a4d79099b076b8ff12d5b4fdbcba54049a6866d diff --git a/src/connector/hivemq-tdengine-extension b/src/connector/hivemq-tdengine-extension index b62a26ecc164a310104df57691691b237e091c89..ce5201014136503d34fecbd56494b67b4961056c 160000 --- a/src/connector/hivemq-tdengine-extension +++ b/src/connector/hivemq-tdengine-extension @@ -1 +1 @@ -Subproject commit b62a26ecc164a310104df57691691b237e091c89 +Subproject commit ce5201014136503d34fecbd56494b67b4961056c diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index f6829bd0ead8c463a3c9d56156a6d7ec51057f1f..e432dac1cea593b371a173f334e5313236091ab3 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.31.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.34-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 8f77582d30d1d8f2b0bf637573da6bbc229b40ae..ef57198e78d2268faba526d5506b0dc384f5766f 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.31 + 2.0.34 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 8ec65a243e21ff6d6da535690144ba06567ce3c9..3d5cf8efe359049d3a9bd2197af7afe1c9fe5321 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.31 + 2.0.34 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -37,13 +37,6 @@ - - junit - junit - 4.13 - test - - org.apache.httpcomponents httpclient @@ -52,12 +45,18 @@ com.alibaba fastjson - 1.2.58 + 1.2.76 com.google.guava guava - 29.0-jre + 30.1.1-jre + + + junit + junit + 4.13.1 + test @@ -113,17 +112,16 @@ **/*Test.java - **/TSDBJNIConnectorTest.java - **/UnsignedNumberJniTest.java - **/DatetimeBefore1970Test.java **/AppMemoryLeakTest.java **/AuthenticationTest.java - **/TaosInfoMonitorTest.java + **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java + **/DatetimeBefore1970Test.java **/FailOverTest.java **/InvalidResultSetPointerTest.java **/RestfulConnectionTest.java - **/TD4144Test.java - **/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java + **/TSDBJNIConnectorTest.java + **/TaosInfoMonitorTest.java + **/UnsignedNumberJniTest.java true diff --git a/src/connector/jdbc/readme.md b/src/connector/jdbc/readme.md index e81f078c153046265cbe9a856f7b48e26fc071fc..3c52ebb00ad3d6d9d32b9fd1400b0b12facd0576 100644 --- a/src/connector/jdbc/readme.md +++ b/src/connector/jdbc/readme.md @@ -1,54 +1,62 @@ +# Java Connector -## TAOS-JDBCDriver 概述 +TDengine 提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现,可在 maven 的中央仓库 [Sonatype Repository][1] 搜索下载。 -TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 +`taos-jdbcdriver` 的实现包括 2 种形式: JDBC-JNI 和 JDBC-RESTful(taos-jdbcdriver-2.0.18 开始支持 JDBC-RESTful)。 JDBC-JNI 通过调用客户端 libtaos.so(或 taos.dll )的本地方法实现, JDBC-RESTful 则在内部封装了 RESTful 接口实现。 -由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +![tdengine-connector](https://www.taosdata.com/cn/documentation/user/pages/images/tdengine-jdbc-connector.png) -* libtaos.so - 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 - -* taos.dll - 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 - -> 注意:在 windows 环境开发时需要安装 TDengine 对应的 windows 版本客户端,由于目前没有提供 Linux 环境单独的客户端,需要安装 TDengine 才能使用。 +上图显示了 3 种 Java 应用使用连接器访问 TDengine 的方式: -TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征的差异导致 taos-jdbcdriver 并未完全实现 JDBC 标准规范。在使用时需要注意以下几点: +* JDBC-JNI:Java 应用在物理节点1(pnode1)上使用 JDBC-JNI 的 API ,直接调用客户端 API(libtaos.so 或 taos.dll)将写入和查询请求发送到位于物理节点2(pnode2)上的 taosd 实例。 +* RESTful:应用将 SQL 发送给位于物理节点2(pnode2)上的 RESTful 连接器,再调用客户端 API(libtaos.so)。 +* JDBC-RESTful:Java 应用通过 JDBC-RESTful 的 API ,将 SQL 封装成一个 RESTful 请求,发送给物理节点2的 RESTful 连接器。 -* TDengine 不提供针对单条数据记录的删除和修改的操作,驱动中也没有支持相关方法。 -* 由于不支持删除和修改,所以也不支持事务操作。 -* 目前不支持表间的 union 操作。 -* 目前不支持嵌套查询(nested query),`对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet还没关闭的情况下执行了新的查询,TSDBJDBCDriver 则会自动关闭上一个 ResultSet`。 +TDengine 的 JDBC 驱动实现尽可能与关系型数据库驱动保持一致,但时序空间数据库与关系对象型数据库服务的对象和技术特征存在差异,导致 `taos-jdbcdriver` 与传统的 JDBC driver 也存在一定差异。在使用时需要注意以下几点: +* TDengine 目前不支持针对单条数据记录的删除操作。 +* 目前不支持事务操作。 +* 目前不支持嵌套查询(nested query)。 +* 对每个 Connection 的实例,至多只能有一个打开的 ResultSet 实例;如果在 ResultSet 还没关闭的情况下执行了新的查询,taos-jdbcdriver 会自动关闭上一个 ResultSet。 -## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 -| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | -| --- | --- | --- | -| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | -| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | +## JDBC-JNI和JDBC-RESTful的对比 -## TDengine DataType 和 Java DataType + + + + + + + + + + + + + + + + + + + + + + + + + +
对比项JDBC-JNIJDBC-RESTful
支持的操作系统linux、windows全平台
是否需要安装 client需要不需要
server 升级后是否需要升级 client需要不需要
写入性能JDBC-RESTful 是 JDBC-JNI 的 50%~90%
查询性能JDBC-RESTful 与 JDBC-JNI 没有差别
-TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: +注意:与 JNI 方式不同,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,RESTful 下所有对表名、超级表名的引用都需要指定数据库名前缀。 -| TDengine DataType | Java DataType | -| --- | --- | -| TIMESTAMP | java.sql.Timestamp | -| INT | java.lang.Integer | -| BIGINT | java.lang.Long | -| FLOAT | java.lang.Float | -| DOUBLE | java.lang.Double | -| SMALLINT, TINYINT |java.lang.Short | -| BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | - -## 如何获取 TAOS-JDBCDriver +## 如何获取 taos-jdbcdriver ### maven 仓库 目前 taos-jdbcdriver 已经发布到 [Sonatype Repository][1] 仓库,且各大仓库都已同步。 + * [sonatype][8] * [mvnrepository][9] * [maven.aliyun][10] @@ -56,56 +64,86 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 maven 项目中使用如下 pom.xml 配置即可: ```xml - - - com.taosdata.jdbc - taos-jdbcdriver - 1.0.3 - - + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.18 + ``` ### 源码编译打包 -下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package` 即可生成相应 jar 包。 +下载 [TDengine][3] 源码之后,进入 taos-jdbcdriver 源码目录 `src/connector/jdbc` 执行 `mvn clean package -Dmaven.test.skip=true` 即可生成相应 jar 包。 + -## 使用说明 +## JDBC的使用说明 ### 获取连接 -如下所示配置即可获取 TDengine Connection: +#### 指定URL获取连接 + +通过指定URL获取连接,如下所示: + +```java +Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); +String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` + +以上示例,使用 **JDBC-RESTful** 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6041,数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 + +使用 JDBC-RESTful 接口,不需要依赖本地函数库。与 JDBC-JNI 相比,仅需要: + +1. driverClass 指定为“com.taosdata.jdbc.rs.RestfulDriver”; +2. jdbcUrl 以“jdbc:TAOS-RS://”开头; +3. 使用 6041 作为连接端口。 + +如果希望获得更好的写入和查询性能,Java 应用可以使用 **JDBC-JNI** 的driver,如下所示: + ```java Class.forName("com.taosdata.jdbc.TSDBDriver"); -String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; Connection conn = DriverManager.getConnection(jdbcUrl); ``` -> 端口 6030 为默认连接端口,JDBC URL 中的 log 为系统本身的监控数据库。 -TDengine 的 JDBC URL 规范格式为: -`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` +以上示例,使用了 JDBC-JNI 的 driver,建立了到 hostname 为 taosdemo.com,端口为 6030(TDengine 的默认端口),数据库名为 test 的连接。这个 URL 中指定用户名(user)为 root,密码(password)为 taosdata。 -其中,`{}` 中的内容必须,`[]` 中为可选。配置参数说明如下: +**注意**:使用 JDBC-JNI 的 driver,taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 +* libtaos.so + 在 linux 系统中成功安装 TDengine 后,依赖的本地函数库 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。 + +* taos.dll + 在 windows 系统中安装完客户端之后,驱动包依赖的 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。 + +> 在 windows 环境开发时需要安装 TDengine 对应的 [windows 客户端][14],Linux 服务器安装完 TDengine 之后默认已安装 client,也可以单独安装 [Linux 客户端][15] 连接远程 TDengine Server。 + +JDBC-JNI 的使用请参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1955.html)。 + +TDengine 的 JDBC URL 规范格式为: +`jdbc:[TAOS|TAOS-RS]://[host_name]:[port]/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +url中的配置参数如下: * user:登录 TDengine 用户名,默认值 root。 * password:用户登录密码,默认值 taosdata。 -* charset:客户端使用的字符集,默认值为系统字符集。 * cfgdir:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* charset:客户端使用的字符集,默认值为系统字符集。 * locale:客户端语言环境,默认值系统当前 locale。 * timezone:客户端使用的时区,默认值为系统当前时区。 -以上参数可以在 3 处配置,`优先级由高到低`分别如下: -1. JDBC URL 参数 - 如上所述,可以在 JDBC URL 的参数中指定。 -2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) + + +#### 指定URL和Properties获取连接 + +除了通过指定的 URL 获取连接,还可以使用 Properties 指定建立连接时的参数,如下所示: ```java public Connection getConn() throws Exception{ Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; + // Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + String jdbcUrl = "jdbc:TAOS://taosdemo.com:6030/test?user=root&password=taosdata"; + // String jdbcUrl = "jdbc:TAOS-RS://taosdemo.com:6041/test?user=root&password=taosdata"; Properties connProps = new Properties(); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); - connProps.setProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR, "/etc/taos"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); @@ -114,22 +152,68 @@ public Connection getConn() throws Exception{ } ``` -3. 客户端配置文件 taos.cfg +以上示例,建立一个到 hostname 为 taosdemo.com,端口为 6030,数据库名为 test 的连接。注释为使用 JDBC-RESTful 时的方法。这个连接在 url 中指定了用户名(user)为 root,密码(password)为 taosdata,并在 connProps 中指定了使用的字符集、语言环境、时区等信息。 + +properties 中的配置参数如下: +* TSDBDriver.PROPERTY_KEY_USER:登录 TDengine 用户名,默认值 root。 +* TSDBDriver.PROPERTY_KEY_PASSWORD:用户登录密码,默认值 taosdata。 +* TSDBDriver.PROPERTY_KEY_CONFIG_DIR:客户端配置文件目录路径,Linux OS 上默认值 /etc/taos ,Windows OS 上默认值 C:/TDengine/cfg。 +* TSDBDriver.PROPERTY_KEY_CHARSET:客户端使用的字符集,默认值为系统字符集。 +* TSDBDriver.PROPERTY_KEY_LOCALE:客户端语言环境,默认值系统当前 locale。 +* TSDBDriver.PROPERTY_KEY_TIME_ZONE:客户端使用的时区,默认值为系统当前时区。 + + + +#### 使用客户端配置文件建立连接 + +当使用 JDBC-JNI 连接 TDengine 集群时,可以使用客户端配置文件,在客户端配置文件中指定集群的 firstEp、secondEp参数。 +如下所示: - linux 系统默认配置文件为 /var/lib/taos/taos.cfg,windows 系统默认配置文件路径为 C:\TDengine\cfg\taos.cfg。 -```properties -# client default username -# defaultUser root +1. 在 Java 应用中不指定 hostname 和 port -# client default password -# defaultPass taosdata +```java +public Connection getConn() throws Exception{ + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://:/test?user=root&password=taosdata"; + Properties connProps = new Properties(); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + connProps.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + Connection conn = DriverManager.getConnection(jdbcUrl, connProps); + return conn; +} +``` + +2. 在配置文件中指定 firstEp 和 secondEp + +``` +# first fully qualified domain name (FQDN) for TDengine system +firstEp cluster_node1:6030 + +# second fully qualified domain name (FQDN) for TDengine system, for cluster only +secondEp cluster_node2:6030 # default system charset -# charset UTF-8 +# charset UTF-8 # system locale # locale en_US.UTF-8 ``` + +以上示例,jdbc 会使用客户端的配置文件,建立到 hostname 为 cluster_node1、端口为 6030、数据库名为 test 的连接。当集群中 firstEp 节点失效时,JDBC 会尝试使用 secondEp 连接集群。 +TDengine 中,只要保证 firstEp 和 secondEp 中一个节点有效,就可以正常建立到集群的连接。 + +> 注意:这里的配置文件指的是调用 JDBC Connector 的应用程序所在机器上的配置文件,Linux OS 上默认值 /etc/taos/taos.cfg ,Windows OS 上默认值 C://TDengine/cfg/taos.cfg。 + +#### 配置参数的优先级 + +通过以上 3 种方式获取连接,如果配置参数在 url、Properties、客户端配置文件中有重复,则参数的`优先级由高到低`分别如下: +1. JDBC URL 参数,如上所述,可以在 JDBC URL 的参数中指定。 +2. Properties connProps +3. 客户端配置文件 taos.cfg + +例如:在 url 中指定了 password 为 taosdata,在 Properties 中指定了 password 为 taosdemo,那么,JDBC 会使用 url 中的 password 建立连接。 + > 更多详细配置请参考[客户端配置][13] ### 创建数据库和表 @@ -146,6 +230,7 @@ stmt.executeUpdate("use db"); // create table stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); ``` + > 注意:如果不使用 `use db` 指定数据库,则后续对表的操作都需要增加数据库名称作为前缀,如 db.tb。 ### 插入数据 @@ -156,6 +241,7 @@ int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now System.out.println("insert " + affectedRows + " rows."); ``` + > now 为系统内部函数,默认为服务器当前时间。 > `now + 1s` 代表服务器当前时间往后加 1 秒,数字后面代表时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。 @@ -177,8 +263,150 @@ while(resultSet.next()){ System.out.printf("%s, %d, %s\n", ts, temperature, humidity); } ``` + > 查询和操作关系型数据库一致,使用下标获取返回字段内容时从 1 开始,建议使用字段名称获取。 +### 处理异常 + +在报错后,通过SQLException可以获取到错误的信息和错误码: + +```java +try (Statement statement = connection.createStatement()) { + // executeQuery + ResultSet resultSet = statement.executeQuery(sql); + // print result + printResult(resultSet); +} catch (SQLException e) { + System.out.println("ERROR Message: " + e.getMessage()); + System.out.println("ERROR Code: " + e.getErrorCode()); + e.printStackTrace(); +} +``` + +JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(错误码在0x2301到0x2350之间),JNI方法的报错(错误码在0x2351到0x2400之间),TDengine其他功能模块的报错。 +具体的错误码请参考: +* https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +* https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h + +### 通过参数绑定写入数据 + +从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。) + +```java +Statement stmt = conn.createStatement(); +Random r = new Random(); + +// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值: +TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags (?, ?) (ts, c1, c2) values(?, ?, ?)"); + +// 设定数据表名: +s.setTableName("w1"); +// 设定 TAGS 取值: +s.setTagInt(0, r.nextInt(10)); +s.setTagString(1, "Beijing"); + +int numOfRows = 10; + +// VALUES 部分以逐列的方式进行设置: +ArrayList ts = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + ts.add(System.currentTimeMillis() + i); +} +s.setTimestamp(0, ts); + +ArrayList s1 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s1.add(r.nextInt(100)); +} +s.setInt(1, s1); + +ArrayList s2 = new ArrayList<>(); +for (int i = 0; i < numOfRows; i++){ + s2.add("test" + r.nextInt(100)); +} +s.setString(2, s2, 10); + +// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据: +s.columnDataAddBatch(); +// 执行绑定数据后的语句: +s.columnDataExecuteBatch(); +// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值): +s.columnDataClearBatch(); +// 执行完毕,释放资源: +s.columnDataCloseBatch(); +``` + +用于设定 TAGS 取值的方法总共有: +```java +public void setTagNull(int index, int type) +public void setTagBoolean(int index, boolean value) +public void setTagInt(int index, int value) +public void setTagByte(int index, byte value) +public void setTagShort(int index, short value) +public void setTagLong(int index, long value) +public void setTagTimestamp(int index, long value) +public void setTagFloat(int index, float value) +public void setTagDouble(int index, double value) +public void setTagString(int index, String value) +public void setTagNString(int index, String value) +``` + +用于设定 VALUES 数据列的取值的方法总共有: +```java +public void setInt(int columnIndex, ArrayList list) throws SQLException +public void setFloat(int columnIndex, ArrayList list) throws SQLException +public void setTimestamp(int columnIndex, ArrayList list) throws SQLException +public void setLong(int columnIndex, ArrayList list) throws SQLException +public void setDouble(int columnIndex, ArrayList list) throws SQLException +public void setBoolean(int columnIndex, ArrayList list) throws SQLException +public void setByte(int columnIndex, ArrayList list) throws SQLException +public void setShort(int columnIndex, ArrayList list) throws SQLException +public void setString(int columnIndex, ArrayList list, int size) throws SQLException +public void setNString(int columnIndex, ArrayList list, int size) throws SQLException +``` +其中 setString 和 setNString 都要求用户在 size 参数里声明表定义中对应列的列宽。 + +### 订阅 + +#### 创建 + +```java +TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +``` + +`subscribe` 方法的三个参数含义如下: + +* topic:订阅的主题(即名称),此参数是订阅的唯一标识 +* sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 +* restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 + +如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 + +#### 消费数据 + +```java +int total = 0; +while(true) { + TSDBResultSet rs = sub.consume(); + int count = 0; + while(rs.next()) { + count++; + } + total += count; + System.out.printf("%d rows consumed, total %d\n", count, total); + Thread.sleep(1000); +} +``` + +`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 + +#### 关闭订阅 + +```java +sub.close(true); +``` + +`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 ### 关闭资源 @@ -187,12 +415,17 @@ resultSet.close(); stmt.close(); conn.close(); ``` + > `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 + + + ## 与连接池使用 **HikariCP** * 引入相应 HikariCP maven 依赖: + ```xml com.zaxxer @@ -202,31 +435,34 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws SQLException { HikariConfig config = new HikariConfig(); + // jdbc properties config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); config.setUsername("root"); config.setPassword("taosdata"); - - config.setMinimumIdle(3); //minimum number of idle connection + // connection pool configurations + config.setMinimumIdle(10); //minimum number of idle connection config.setMaximumPoolSize(10); //maximum number of connection in the pool - config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool - config.setIdleTimeout(60000); // max idle time for recycle idle connection - config.setConnectionTestQuery("describe log.dn"); //validation query - config.setValidationTimeout(3000); //validation query timeout + config.setConnectionTimeout(30000); //maximum wait milliseconds for get connection from pool + config.setMaxLifetime(0); // maximum life time for each connection + config.setIdleTimeout(0); // max idle time for recycle idle connection + config.setConnectionTestQuery("select server_status()"); //validation query HikariDataSource ds = new HikariDataSource(config); //create datasource - + Connection connection = ds.getConnection(); // get connection Statement statement = connection.createStatement(); // get statement - - //query or insert + + //query or insert // ... - + connection.close(); // put back to conneciton pool } ``` + > 通过 HikariDataSource.getConnection() 获取连接后,使用完成后需要调用 close() 方法,实际上它并不会关闭连接,只是放回连接池中。 > 更多 HikariCP 使用问题请查看[官方说明][5] @@ -243,40 +479,32 @@ conn.close(); ``` * 使用示例如下: + ```java public static void main(String[] args) throws Exception { - Properties properties = new Properties(); - properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); - properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); - properties.put("username","root"); - properties.put("password","taosdata"); - - properties.put("maxActive","10"); //maximum number of connection in the pool - properties.put("initialSize","3");//initial number of connection - properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool - properties.put("minIdle","3");//minimum number of connection in the pool - - properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection - - properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle - properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle - - properties.put("validationQuery","describe log.dn"); //validation query - properties.put("testWhileIdle","true"); // test connection while idle - properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true - properties.put("testOnReturn","false"); // don't need while testWhileIdle is true - - //create druid datasource - DataSource ds = DruidDataSourceFactory.createDataSource(properties); - Connection connection = ds.getConnection(); // get connection - Statement statement = connection.createStatement(); // get statement + DruidDataSource dataSource = new DruidDataSource(); + // jdbc properties + dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver"); + dataSource.setUrl(url); + dataSource.setUsername("root"); + dataSource.setPassword("taosdata"); + // pool configurations + dataSource.setInitialSize(10); + dataSource.setMinIdle(10); + dataSource.setMaxActive(10); + dataSource.setMaxWait(30000); + dataSource.setValidationQuery("select server_status()"); + + Connection connection = dataSource.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement //query or insert // ... connection.close(); // put back to conneciton pool } ``` + > 更多 druid 使用问题请查看[官方说明][6] **注意事项** @@ -291,29 +519,64 @@ server_status()| Query OK, 1 row(s) in set (0.000141s) ``` + + ## 与框架使用 * Spring JdbcTemplate 中使用 taos-jdbcdriver,可参考 [SpringJdbcTemplate][11] * Springboot + Mybatis 中使用,可参考 [springbootdemo][12] + + +## TAOS-JDBCDriver 版本以及支持的 TDengine 版本和 JDK 版本 + +| taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | +| -------------------- | ----------------- | -------- | +| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | +| 2.0.22 - 2.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | +| 1.0.3 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.2 | 1.6.1.x 及以上 | 1.8.x | +| 1.0.1 | 1.6.1.x 及以上 | 1.8.x | + + + +## TDengine DataType 和 Java DataType + +TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: + +| TDengine DataType | Java DataType | +| ----------------- | ------------------ | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | + + + ## 常见问题 * java.lang.UnsatisfiedLinkError: no taos in java.library.path - + **原因**:程序没有找到依赖的本地函数库 taos。 - - **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 - + + **解决方法**:windows 下可以将 C:\TDengine\driver\taos.dll 拷贝到 C:\Windows\System32\ 目录下,linux 下将建立如下软链 `ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` 即可。 + * java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform - + **原因**:目前 TDengine 只支持 64 位 JDK。 - + **解决方法**:重新安装 64 位 JDK。 * 其它问题请参考 [Issues][7] - - [1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver [2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver [3]: https://github.com/taosdata/TDengine @@ -324,6 +587,9 @@ Query OK, 1 row(s) in set (0.000141s) [8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver [9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver [10]: https://maven.aliyun.com/mvn/search -[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate [12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo -[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE \ No newline at end of file +[13]: https://www.taosdata.com/cn/documentation/administrator/#client +[14]: https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client +[15]: https://www.taosdata.com/cn/getting-started/#%E5%AE%A2%E6%88%B7%E7%AB%AF + diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index 2970f6c2d37d73476ee0d8831a68128569766fb4..9950dbeb64c8cf4457b692a834d587ff8fd2e808 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -1,5 +1,7 @@ package com.taosdata.jdbc; +import com.taosdata.jdbc.enums.TimestampFormat; + import java.sql.*; import java.util.Enumeration; import java.util.Map; @@ -18,7 +20,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti for (String propName : propNames) { clientInfoProps.setProperty(propName, properties.getProperty(propName)); } - String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, "STRING"); + String timestampFormat = properties.getProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, String.valueOf(TimestampFormat.STRING)); clientInfoProps.setProperty(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT, timestampFormat); } @@ -169,11 +171,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti // do nothing } - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - + private void checkResultSetTypeAndResultSetConcurrency(int resultSetType, int resultSetConcurrency) throws SQLException { switch (resultSetType) { case ResultSet.TYPE_FORWARD_ONLY: break; @@ -192,7 +190,14 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti default: throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); } + } + @Override + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); + + checkResultSetTypeAndResultSetConcurrency(resultSetType, resultSetConcurrency); return createStatement(); } @@ -201,24 +206,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - switch (resultSetType) { - case ResultSet.TYPE_FORWARD_ONLY: - break; - case ResultSet.TYPE_SCROLL_INSENSITIVE: - case ResultSet.TYPE_SCROLL_SENSITIVE: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - default: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } - - switch (resultSetConcurrency) { - case ResultSet.CONCUR_READ_ONLY: - break; - case ResultSet.CONCUR_UPDATABLE: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - default: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - } + checkResultSetTypeAndResultSetConcurrency(resultSetType, resultSetConcurrency); return prepareStatement(sql); } @@ -304,9 +292,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti @Override public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - switch (resultSetHoldability) { case ResultSet.HOLD_CURSORS_OVER_COMMIT: break; @@ -320,11 +305,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti } @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { switch (resultSetHoldability) { case ResultSet.HOLD_CURSORS_OVER_COMMIT: break; @@ -423,7 +404,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti status = resultSet.getInt("server_status()"); resultSet.close(); } - return status == 1 ? true : false; + return status == 1; }); boolean status = false; @@ -432,9 +413,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti status = future.get(); else status = future.get(timeout, TimeUnit.SECONDS); - } catch (InterruptedException e) { - e.printStackTrace(); - } catch (ExecutionException e) { + } catch (InterruptedException | ExecutionException e) { e.printStackTrace(); } catch (TimeoutException e) { future.cancel(true); @@ -450,8 +429,7 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed) throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); - if (clientInfoProps != null) - clientInfoProps.setProperty(name, value); + clientInfoProps.setProperty(name, value); } @Override @@ -459,8 +437,8 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed) throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); - for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { - String name = (String) enumer.nextElement(); + for (Enumeration enumeration = properties.keys(); enumeration.hasMoreElements(); ) { + String name = (String) enumeration.nextElement(); clientInfoProps.put(name, properties.getProperty(name)); } } @@ -516,7 +494,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti public void abort(Executor executor) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - // do nothing } @@ -527,14 +504,13 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (milliseconds < 0) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + // do nothing } @Override public int getNetworkTimeout() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return 0; } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java index 48811d30a6903c077a9c0fb55176b0bc2ca9ba75..3c9c784f594d6cb022267c2ff1cd848c26f53ac3 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDatabaseMetaData.java @@ -12,8 +12,7 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da private final static int DRIVER_MAJAR_VERSION = 2; private final static int DRIVER_MINOR_VERSION = 0; - private String precision = "ms"; - private String database; + private String precision = TSDBConstants.DEFAULT_PRECISION; public boolean allProceduresAreCallable() throws SQLException { return false; @@ -1223,7 +1222,6 @@ public abstract class AbstractDatabaseMetaData extends WrapperImpl implements Da ResultSet databases = stmt.executeQuery("show databases"); while (databases.next()) { String dbname = databases.getString("name"); - this.database = dbname; this.precision = databases.getString("precision"); if (dbname.equalsIgnoreCase(catalog)) return true; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java index 5eaac1cd3ba7283b019a1d294c1a33334a3d9fa7..7d4a2683918d917c795fc4d23af3369560e3ef52 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractDriver.java @@ -58,7 +58,7 @@ public abstract class AbstractDriver implements Driver { value = parameterValuePair.substring(indexOfEqual + 1); } } - if ((value != null && value.length() > 0) && (parameter != null && parameter.length() > 0)) { + if (value != null && value.length() > 0 && parameter.length() > 0) { urlProps.setProperty(parameter, value); } } @@ -87,7 +87,7 @@ public abstract class AbstractDriver implements Driver { url = url.substring(0, indexOfColon); } // parse host - if (url != null && url.length() > 0 && url.trim().length() > 0) { + if (url.length() > 0 && url.trim().length() > 0) { urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); } return urlProps; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java index cb6c7d40ad1f1e12324043d78cb423fcfd46dca8..7d9d8ee5154bf3e63c024d0ded1defd2d2bdb4b0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java @@ -1,7 +1,5 @@ package com.taosdata.jdbc; -import com.sun.org.apache.xpath.internal.operations.Bool; - import java.sql.ParameterMetaData; import java.sql.SQLException; import java.sql.Timestamp; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java index 9dc559339a4ef88b8423ffcd621c7498437dac5c..a801f5a674acdd23f1ca7f949cbb7092f4633bda 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java @@ -2,6 +2,7 @@ package com.taosdata.jdbc; import java.sql.*; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; public abstract class AbstractStatement extends WrapperImpl implements Statement { @@ -9,8 +10,6 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement protected List batchedArgs; private int fetchSize; - - @Override public abstract ResultSet executeQuery(String sql) throws SQLException; @@ -198,13 +197,44 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement if (batchedArgs == null || batchedArgs.isEmpty()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_BATCH_IS_EMPTY); + String clientInfo = getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_BATCH_ERROR_IGNORE); + boolean batchErrorIgnore = clientInfo == null ? TSDBConstants.DEFAULT_BATCH_ERROR_IGNORE : Boolean.parseBoolean(clientInfo); + + if (batchErrorIgnore) { + return executeBatchIgnoreException(); + } + return executeBatchThrowException(); + } + + private int[] executeBatchIgnoreException() { + return batchedArgs.stream().mapToInt(sql -> { + try { + boolean isSelect = execute(sql); + if (isSelect) { + return SUCCESS_NO_INFO; + } else { + return getUpdateCount(); + } + } catch (SQLException e) { + return EXECUTE_FAILED; + } + }).toArray(); + } + + private int[] executeBatchThrowException() throws BatchUpdateException { int[] res = new int[batchedArgs.size()]; for (int i = 0; i < batchedArgs.size(); i++) { - boolean isSelect = execute(batchedArgs.get(i)); - if (isSelect) { - res[i] = SUCCESS_NO_INFO; - } else { - res[i] = getUpdateCount(); + try { + boolean isSelect = execute(batchedArgs.get(i)); + if (isSelect) { + res[i] = SUCCESS_NO_INFO; + } else { + res[i] = getUpdateCount(); + } + } catch (SQLException e) { + String reason = e.getMessage(); + int[] updateCounts = Arrays.copyOfRange(res, 0, i); + throw new BatchUpdateException(reason, updateCounts, e); } } return res; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java index cd266529f34c0693a9891af33c12705b90132800..bda3d522123d09ece81384c6eba814c7e548e1ec 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/DatabaseMetaDataResultSet.java @@ -15,10 +15,12 @@ package com.taosdata.jdbc; import java.math.BigDecimal; -import java.sql.*; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; import java.util.ArrayList; import java.util.Calendar; -import java.util.Iterator; import java.util.List; /* @@ -130,7 +132,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet { public Timestamp getTimestamp(int columnIndex) throws SQLException { int colType = columnMetaDataList.get(columnIndex - 1).getColType(); int nativeType = TSDBConstants.jdbcType2TaosType(colType); - return rowCursor.getTimestamp(columnIndex,nativeType); + return rowCursor.getTimestamp(columnIndex, nativeType); } @Override @@ -145,9 +147,7 @@ public class DatabaseMetaDataResultSet extends AbstractResultSet { @Override public int findColumn(String columnLabel) throws SQLException { - Iterator colMetaDataIt = this.columnMetaDataList.iterator(); - while (colMetaDataIt.hasNext()) { - ColumnMetaData colMetaData = colMetaDataIt.next(); + for (ColumnMetaData colMetaData : this.columnMetaDataList) { if (colMetaData.getColName() != null && colMetaData.getColName().equals(columnLabel)) { return colMetaData.getColIndex(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java index b6587b942de3c46139fa0640c07098cbc2b025d4..fa8bf9e7e99ed38cf8ff718e00731c4fae0158a9 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/EmptyResultSet.java @@ -23,7 +23,7 @@ import java.util.Calendar; import java.util.Map; /* - * TDengine only supports a subset of the standard SQL, thus this implemetation of the + * TDengine only supports a subset of the standard SQL, thus this implementation of the * standard JDBC API contains more or less some adjustments customized for certain * compatibility needs. */ @@ -326,7 +326,7 @@ public class EmptyResultSet implements ResultSet { @Override public int getFetchDirection() throws SQLException { - return 0; + return ResultSet.FETCH_FORWARD; } @Override @@ -341,12 +341,12 @@ public class EmptyResultSet implements ResultSet { @Override public int getType() throws SQLException { - return 0; + return ResultSet.TYPE_FORWARD_ONLY; } @Override public int getConcurrency() throws SQLException { - return 0; + return ResultSet.CONCUR_READ_ONLY; } @Override @@ -746,7 +746,7 @@ public class EmptyResultSet implements ResultSet { @Override public int getHoldability() throws SQLException { - return 0; + return ResultSet.CLOSE_CURSORS_AT_COMMIT; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index 02fee74eb5544f282559f88dab723ccfd8ca096f..8cd8da6de4f7d5324afbc6d5a5d54d6b8dcc7a8d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -20,7 +20,7 @@ import java.util.Properties; public class TSDBConnection extends AbstractConnection { private TSDBJNIConnector connector; - private TSDBDatabaseMetaData databaseMetaData; + private final TSDBDatabaseMetaData databaseMetaData; private boolean batchFetch; public Boolean getBatchFetch() { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java index e6406d2c6dc7990ec4f3149bb8c5146202f5d326..740e3c6c21be568bf71e4d68a3129c527da441a6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConstants.java @@ -54,10 +54,11 @@ public abstract class TSDBConstants { public static final int TSDB_DATA_TYPE_USMALLINT = 12; //unsigned smallint public static final int TSDB_DATA_TYPE_UINT = 13; //unsigned int public static final int TSDB_DATA_TYPE_UBIGINT = 14; //unsigned bigint + // nchar column max length public static final int maxFieldSize = 16 * 1024; - // precision for data types + // precision for data types, this is used for metadata public static final int BOOLEAN_PRECISION = 1; public static final int TINYINT_PRECISION = 4; public static final int SMALLINT_PRECISION = 6; @@ -67,10 +68,14 @@ public abstract class TSDBConstants { public static final int DOUBLE_PRECISION = 22; public static final int TIMESTAMP_MS_PRECISION = 23; public static final int TIMESTAMP_US_PRECISION = 26; - // scale for data types + // scale for data types, this is used for metadata public static final int FLOAT_SCALE = 31; public static final int DOUBLE_SCALE = 31; + public static final String DEFAULT_PRECISION = "ms"; + + public static final boolean DEFAULT_BATCH_ERROR_IGNORE = false; + public static int typeName2JdbcType(String type) { switch (type.toUpperCase()) { case "TIMESTAMP": @@ -125,7 +130,7 @@ public abstract class TSDBConstants { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: return Types.NCHAR; } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE); } public static String taosType2JdbcTypeName(int taosType) throws SQLException { @@ -155,7 +160,7 @@ public abstract class TSDBConstants { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: return "NCHAR"; default: - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java index 8b7ede148e89cce0d8db22e62627bd1e1c49f9bb..9a5eda4cd8bf59e75540a4ce0a1d7fb4255fa1f1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDatabaseMetaData.java @@ -20,8 +20,8 @@ import java.sql.SQLException; public class TSDBDatabaseMetaData extends AbstractDatabaseMetaData { - private String url; - private String userName; + private final String url; + private final String userName; private Connection conn; public TSDBDatabaseMetaData(String url, String userName) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 55533bd28cc4027c2e4a258cf2a36fd5b72d12f2..f5f16758c1eb6df0a007405412b7f32082dfb026 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -100,6 +100,11 @@ public class TSDBDriver extends AbstractDriver { */ public static final String PROPERTY_KEY_TIMESTAMP_FORMAT = "timestampFormat"; + /** + * continue process commands in executeBatch + */ + public static final String PROPERTY_KEY_BATCH_ERROR_IGNORE = "batchErrorIgnore"; + private TSDBDatabaseMetaData dbMetaData = null; static { @@ -176,7 +181,7 @@ public class TSDBDriver extends AbstractDriver { int beginningOfSlashes = url.indexOf("//"); int index = url.indexOf("?"); if (index != -1) { - String paramString = url.substring(index + 1, url.length()); + String paramString = url.substring(index + 1); url = url.substring(0, index); StringTokenizer queryParams = new StringTokenizer(paramString, "&"); while (queryParams.hasMoreElements()) { @@ -213,7 +218,7 @@ public class TSDBDriver extends AbstractDriver { url = url.substring(0, indexOfColon); } - if (url != null && url.length() > 0 && url.trim().length() > 0) { + if (url.length() > 0 && url.trim().length() > 0) { urlProps.setProperty(TSDBDriver.PROPERTY_KEY_HOST, url); } @@ -233,7 +238,7 @@ public class TSDBDriver extends AbstractDriver { return false; } - public Logger getParentLogger() throws SQLFeatureNotSupportedException { + public Logger getParentLogger() { return null; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java index 90967b3620fdaccf69f253c8f8927c067b6221fd..d626698663c648ee8c39bab4d5f7831099ba8c81 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBError.java @@ -8,7 +8,7 @@ import java.util.HashMap; import java.util.Map; public class TSDBError { - private static Map TSDBErrorMap = new HashMap<>(); + private static final Map TSDBErrorMap = new HashMap<>(); static { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED, "connection already closed"); @@ -31,11 +31,11 @@ public class TSDBError { TSDBErrorMap.put(TSDBErrorNumbers.ERROR_URL_NOT_SET, "url is not set"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_INVALID_SQL, "invalid sql"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE, "numeric value out of range"); - TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE, "unknown taos type in tdengine"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TAOS_TYPE, "unknown taos type in tdengine"); + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION, "unknown timestamp precision"); - /**************************************************/ TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown error"); - /**************************************************/ + TSDBErrorMap.put(TSDBErrorNumbers.ERROR_SUBSCRIBE_FAILED, "failed to create subscription"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_UNSUPPORTED_ENCODING, "Unsupported encoding"); TSDBErrorMap.put(TSDBErrorNumbers.ERROR_JNI_TDENGINE_ERROR, "internal error of database"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java index c978bb3a2e4a1b9dbd264268f057f2b6c735250a..3c44d69be58c5b124493367e3d2efb8c7d835e53 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java @@ -1,6 +1,7 @@ package com.taosdata.jdbc; import java.util.HashSet; +import java.util.Set; public class TSDBErrorNumbers { @@ -24,7 +25,10 @@ public class TSDBErrorNumbers { public static final int ERROR_URL_NOT_SET = 0x2312; // url is not set public static final int ERROR_INVALID_SQL = 0x2313; // invalid sql public static final int ERROR_NUMERIC_VALUE_OUT_OF_RANGE = 0x2314; // numeric value out of range - public static final int ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE = 0x2315; //unknown taos type in tdengine + public static final int ERROR_UNKNOWN_TAOS_TYPE = 0x2315; //unknown taos type in tdengine + public static final int ERROR_UNKNOWN_TIMESTAMP_PRECISION = 0x2316; // unknown timestamp precision + public static final int ERROR_RESTFul_Client_Protocol_Exception = 0x2317; + public static final int ERROR_RESTFul_Client_IOException = 0x2318; public static final int ERROR_UNKNOWN = 0x2350; //unknown error @@ -38,10 +42,9 @@ public class TSDBErrorNumbers { public static final int ERROR_JNI_FETCH_END = 0x2358; // fetch to the end of resultSet public static final int ERROR_JNI_OUT_OF_MEMORY = 0x2359; // JNI alloc memory failed - private static final HashSet errorNumbers; + private static final Set errorNumbers = new HashSet<>(); static { - errorNumbers = new HashSet(); errorNumbers.add(ERROR_CONNECTION_CLOSED); errorNumbers.add(ERROR_UNSUPPORTED_METHOD); errorNumbers.add(ERROR_INVALID_VARIABLE); @@ -61,9 +64,12 @@ public class TSDBErrorNumbers { errorNumbers.add(ERROR_URL_NOT_SET); errorNumbers.add(ERROR_INVALID_SQL); errorNumbers.add(ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE_IN_TDENGINE); + errorNumbers.add(ERROR_UNKNOWN_TAOS_TYPE); + errorNumbers.add(ERROR_UNKNOWN_TIMESTAMP_PRECISION); + errorNumbers.add(ERROR_RESTFul_Client_IOException); + + errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception); - /*****************************************************/ errorNumbers.add(ERROR_SUBSCRIBE_FAILED); errorNumbers.add(ERROR_UNSUPPORTED_ENCODING); errorNumbers.add(ERROR_JNI_TDENGINE_ERROR); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index bc4d58785a1cfd15ba236c3e2f6e355f209ec916..051eca7e10ad18daea6a7b1ad55f148b786e0798 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -29,14 +29,9 @@ import java.util.List; public class TSDBJNIConnector { private static volatile Boolean isInitialized = false; - private TaosInfo taosInfo = TaosInfo.getInstance(); - - // Connection pointer used in C - private long taos = TSDBConstants.JNI_NULL_POINTER; - - // result set status in current connection - private boolean isResultsetClosed; - + private final TaosInfo taosInfo = TaosInfo.getInstance(); + private long taos = TSDBConstants.JNI_NULL_POINTER; // Connection pointer used in C + private boolean isResultsetClosed; // result set status in current connection private int affectedRows = -1; static { @@ -96,11 +91,9 @@ public class TSDBJNIConnector { /** * Execute DML/DDL operation - * - * @throws SQLException */ public long executeQuery(String sql) throws SQLException { - Long pSql = 0l; + long pSql = 0L; try { pSql = this.executeQueryImp(sql.getBytes(TaosGlobalConfig.getCharset()), this.taos); taosInfo.stmt_count_increment(); @@ -161,7 +154,7 @@ public class TSDBJNIConnector { private native long getResultSetImp(long connection, long pSql); public boolean isUpdateQuery(long pSql) { - return isUpdateQueryImp(this.taos, pSql) == 1 ? true : false; + return isUpdateQueryImp(this.taos, pSql) == 1; } private native long isUpdateQueryImp(long connection, long pSql); @@ -195,7 +188,7 @@ public class TSDBJNIConnector { */ public int getSchemaMetaData(long resultSet, List columnMetaData) { int ret = this.getSchemaMetaDataImp(this.taos, resultSet, columnMetaData); - columnMetaData.stream().forEach(column -> column.setColIndex(column.getColIndex() + 1)); + columnMetaData.forEach(column -> column.setColIndex(column.getColIndex() + 1)); return ret; } @@ -218,8 +211,9 @@ public class TSDBJNIConnector { /** * Get Result Time Precision. + * * @return 0: ms, 1: us, 2: ns - */ + */ public int getResultTimePrecision(long sqlObj) { return this.getResultTimePrecisionImp(this.taos, sqlObj); } @@ -228,8 +222,6 @@ public class TSDBJNIConnector { /** * Execute close operation from C to release connection pointer by JNI - * - * @throws SQLException */ public void closeConnection() throws SQLException { int code = this.closeConnectionImp(this.taos); @@ -268,8 +260,6 @@ public class TSDBJNIConnector { /** * Unsubscribe, close a subscription - * - * @param subscription */ void unsubscribe(long subscription, boolean isKeep) { unsubscribeImp(subscription, isKeep); @@ -282,13 +272,13 @@ public class TSDBJNIConnector { */ public boolean validateCreateTableSql(String sql) { int res = validateCreateTableSqlImp(taos, sql.getBytes()); - return res != 0 ? false : true; + return res == 0; } private native int validateCreateTableSqlImp(long connection, byte[] sqlBytes); public long prepareStmt(String sql) throws SQLException { - Long stmt; + long stmt; try { stmt = prepareStmtImp(sql.getBytes(), this.taos); } catch (Exception e) { @@ -358,4 +348,13 @@ public class TSDBJNIConnector { } private native int closeStmt(long stmt, long con); + + public void insertLines(String[] lines) throws SQLException { + int code = insertLinesImp(lines, this.taos); + if (code != TSDBConstants.JNI_SUCCESS) { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to insertLines"); + } + } + + private native int insertLinesImp(String[] lines, long conn); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index c3d5abf35c7304f86f9cc4dc4449d8c88d144e3d..22fb0c4ae4987ade0a406fe5628bf80d975f3ae5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -38,7 +38,6 @@ import java.util.regex.Pattern; public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { private String rawSql; private Object[] parameters; - private boolean isPrepared; private ArrayList colData; private ArrayList tableTags; @@ -47,8 +46,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat private String tableName; private long nativeStmtHandle = 0; - private volatile TSDBParameterMetaData parameterMetaData; - TSDBPreparedStatement(TSDBConnection connection, String sql) { super(connection); init(sql); @@ -60,14 +57,13 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat parameterCnt++; } } - parameters = new Object[parameterCnt]; - this.isPrepared = true; } + parameters = new Object[parameterCnt]; if (parameterCnt > 1) { // the table name is also a parameter, so ignore it. - this.colData = new ArrayList(); - this.tableTags = new ArrayList(); + this.colData = new ArrayList<>(); + this.tableTags = new ArrayList<>(); } } @@ -76,18 +72,17 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat preprocessSql(); } - @Override - public int[] executeBatch() throws SQLException { - return super.executeBatch(); - } - /* + * + */ + + /** * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in * order to process those supported SQLs. */ private void preprocessSql() { - /***** For processing some of Spark SQLs*****/ + /***For processing some of Spark SQLs*/ // should replace it first this.rawSql = this.rawSql.replaceAll("or (.*) is null", ""); this.rawSql = this.rawSql.replaceAll(" where ", " WHERE "); @@ -134,32 +129,17 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } rawSql = rawSql.replace(matcher.group(1), tableFullName); } - /***** for inner queries *****/ - } - - /** - * Populate parameters into prepared sql statements - * - * @return a string of the native sql statement for TSDB - */ - private String getNativeSql(String rawSql) throws SQLException { - return Utils.getNativeSql(rawSql, this.parameters); } @Override public ResultSet executeQuery() throws SQLException { - if (!isPrepared) - return executeQuery(this.rawSql); - - final String sql = getNativeSql(this.rawSql); + final String sql = Utils.getNativeSql(this.rawSql, this.parameters); return executeQuery(sql); } @Override public int executeUpdate() throws SQLException { - if (!isPrepared) - return executeUpdate(this.rawSql); - String sql = getNativeSql(this.rawSql); + String sql = Utils.getNativeSql(this.rawSql, this.parameters); return executeUpdate(sql); } @@ -282,25 +262,14 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public boolean execute() throws SQLException { - if (!isPrepared) - return execute(this.rawSql); - - final String sql = getNativeSql(this.rawSql); + final String sql = Utils.getNativeSql(this.rawSql, this.parameters); return execute(sql); } @Override public void addBatch() throws SQLException { - if (this.batchedArgs == null) { - batchedArgs = new ArrayList<>(); - } - - if (!isPrepared) { - addBatch(this.rawSql); - } else { - String sql = this.getConnection().nativeSQL(this.rawSql); - addBatch(sql); - } + String sql = Utils.getNativeSql(this.rawSql, this.parameters); + addBatch(sql); } @Override @@ -553,12 +522,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } } - ; - private static class TableTagInfo { private boolean isNull; - private Object value; - private int type; + private final Object value; + private final int type; public TableTagInfo(Object value, int type) { this.value = value; @@ -572,8 +539,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } } - ; - public void setTableName(String name) { this.tableName = name; } @@ -661,7 +626,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.colData.addAll(Collections.nCopies(this.parameters.length - 1 - this.tableTags.size(), null)); } - ColumnInfo col = (ColumnInfo) this.colData.get(columnIndex); + ColumnInfo col = this.colData.get(columnIndex); if (col == null) { ColumnInfo p = new ColumnInfo(); p.setType(type); @@ -752,8 +717,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat ByteBuffer isNullList = ByteBuffer.allocate(num * Integer.BYTES); isNullList.order(ByteOrder.LITTLE_ENDIAN); - for (int i = 0; i < num; ++i) { - TableTagInfo tag = this.tableTags.get(i); + for (TableTagInfo tag : this.tableTags) { if (tag.isNull) { typeList.put((byte) tag.type); isNullList.putInt(1); @@ -852,7 +816,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat typeList, lengthList, isNullList); } - ColumnInfo colInfo = (ColumnInfo) this.colData.get(0); + ColumnInfo colInfo = this.colData.get(0); if (colInfo == null) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "column data not bind"); } @@ -988,7 +952,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "not support data types"); } } - ; connector.bindColumnDataArray(this.nativeStmtHandle, colDataList, lengthList, isNullList, col1.type, col1.bytes, rows, i); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index 59a64ad520f01c8c7f2b85f95057365e2410ecb6..00a62206fc7861a87177d14cc4b274c464dc4184 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -339,11 +339,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: case TSDBConstants.TSDB_DATA_TYPE_INT: case TSDBConstants.TSDB_DATA_TYPE_BIGINT: - res = new BigDecimal(Long.valueOf(this.rowData.getObject(columnIndex).toString())); + res = new BigDecimal(Long.parseLong(this.rowData.getObject(columnIndex).toString())); break; case TSDBConstants.TSDB_DATA_TYPE_FLOAT: case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: - res = new BigDecimal(Double.valueOf(this.rowData.getObject(columnIndex).toString())); + res = BigDecimal.valueOf(Double.parseDouble(this.rowData.getObject(columnIndex).toString())); break; case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: return new BigDecimal(((Timestamp) this.rowData.getObject(columnIndex)).getTime()); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java index 7b3be5d26397eae704d98f1e1802af14abaad4fc..6211f61dc505d2ccba5f11f3aacc980771b1a110 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetBlockData.java @@ -36,23 +36,20 @@ public class TSDBResultSetBlockData { private int rowIndex = 0; private List columnMetaDataList; - private ArrayList colData = null; + private ArrayList colData; public TSDBResultSetBlockData(List colMeta, int numOfCols) { this.columnMetaDataList = colMeta; - this.colData = new ArrayList(numOfCols); + this.colData = new ArrayList<>(numOfCols); } public TSDBResultSetBlockData() { - this.colData = new ArrayList(); + this.colData = new ArrayList<>(); } public void clear() { int size = this.colData.size(); - if (this.colData != null) { - this.colData.clear(); - } - + this.colData.clear(); setNumOfCols(size); } @@ -69,7 +66,7 @@ public class TSDBResultSetBlockData { } public void setNumOfCols(int numOfCols) { - this.colData = new ArrayList(numOfCols); + this.colData = new ArrayList<>(numOfCols); this.colData.addAll(Collections.nCopies(numOfCols, null)); } @@ -166,15 +163,10 @@ public class TSDBResultSetBlockData { } } - /** * The original type may not be a string type, but will be converted to by * calling this method - * - * @param col column index - * @return - * @throws SQLException */ public String getString(int col) throws SQLException { Object obj = get(col); @@ -387,7 +379,7 @@ public class TSDBResultSetBlockData { return null; } - return (long) val; + return val; } case TSDBConstants.TSDB_DATA_TYPE_FLOAT: { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java index 48d42473926b638401cf5d9dd97466695ba452ab..6292673352529171cdc42ba73e0f47f8f05a21a4 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java @@ -41,10 +41,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD } public boolean isSearchable(int column) throws SQLException { - if (column == 1) { - return true; - } - return false; + return column == 1; } public boolean isCurrency(int column) throws SQLException { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index b0b3ae4180ea5df185f6f54e5c0c99fd49493d21..2ff0d86c920aa0aae67f71448bf9112564293350 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -26,7 +26,7 @@ import java.util.Collections; public class TSDBResultSetRowData { private ArrayList data; - private int colSize; + private final int colSize; public TSDBResultSetRowData(int colSize) { this.colSize = colSize; @@ -147,30 +147,14 @@ public class TSDBResultSetRowData { case TSDBConstants.TSDB_DATA_TYPE_NCHAR: case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Integer.parseInt((String) obj); - case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: { - Byte value = (byte) obj; - if (value < 0) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - return value; - } - case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: { - short value = (short) obj; - if (value < 0) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - return value; - } - case TSDBConstants.TSDB_DATA_TYPE_UINT: { - int value = (int) obj; - if (value < 0) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - return value; - } - case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: { - long value = (long) obj; - if (value < 0) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); - return Long.valueOf(value).intValue(); - } + case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: + return parseUnsignedTinyIntToInt(obj); + case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: + return parseUnsignedSmallIntToInt(obj); + case TSDBConstants.TSDB_DATA_TYPE_UINT: + return parseUnsignedIntegerToInt(obj); + case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: + return parseUnsignedBigIntToInt(obj); case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return ((Float) obj).intValue(); case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: @@ -180,6 +164,35 @@ public class TSDBResultSetRowData { } } + private byte parseUnsignedTinyIntToInt(Object obj) throws SQLException { + byte value = (byte) obj; + if (value < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); + return value; + } + + private short parseUnsignedSmallIntToInt(Object obj) throws SQLException { + short value = (short) obj; + if (value < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); + return value; + } + + private int parseUnsignedIntegerToInt(Object obj) throws SQLException { + int value = (int) obj; + if (value < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); + return value; + } + + private int parseUnsignedBigIntToInt(Object obj) throws SQLException { + long value = (long) obj; + if (value < 0) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); + return Long.valueOf(value).intValue(); + } + + /** * $$$ this method is invoked by databaseMetaDataResultSet and so on which use a index start from 1 in JDBC api */ @@ -216,7 +229,7 @@ public class TSDBResultSetRowData { case TSDBConstants.TSDB_DATA_TYPE_BINARY: return Long.parseLong((String) obj); case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: { - Byte value = (byte) obj; + byte value = (byte) obj; if (value < 0) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_NUMERIC_VALUE_OUT_OF_RANGE); return value; @@ -377,27 +390,27 @@ public class TSDBResultSetRowData { switch (nativeType) { case TSDBConstants.TSDB_DATA_TYPE_UTINYINT: { - Byte value = new Byte(String.valueOf(obj)); + byte value = new Byte(String.valueOf(obj)); if (value >= 0) - return value.toString(); + return Byte.toString(value); return Integer.toString(value & 0xff); } case TSDBConstants.TSDB_DATA_TYPE_USMALLINT: { - Short value = new Short(String.valueOf(obj)); + short value = new Short(String.valueOf(obj)); if (value >= 0) - return value.toString(); + return Short.toString(value); return Integer.toString(value & 0xffff); } case TSDBConstants.TSDB_DATA_TYPE_UINT: { - Integer value = new Integer(String.valueOf(obj)); + int value = new Integer(String.valueOf(obj)); if (value >= 0) - return value.toString(); - return Long.toString(value & 0xffffffffl); + return Integer.toString(value); + return Long.toString(value & 0xffffffffL); } case TSDBConstants.TSDB_DATA_TYPE_UBIGINT: { - Long value = new Long(String.valueOf(obj)); + long value = new Long(String.valueOf(obj)); if (value >= 0) - return value.toString(); + return Long.toString(value); long lowValue = value & 0x7fffffffffffffffL; return BigDecimal.valueOf(lowValue).add(BigDecimal.valueOf(Long.MAX_VALUE)).add(BigDecimal.valueOf(1)).toString(); } @@ -419,25 +432,26 @@ public class TSDBResultSetRowData { /** * !!! this method is invoked by JNI method and the index start from 0 in C implementations + * * @param precision 0 : ms, 1 : us, 2 : ns */ public void setTimestamp(int col, long ts, int precision) { - long milliseconds = 0; - int fracNanoseconds = 0; + long milliseconds; + int fracNanoseconds; switch (precision) { case 0: { milliseconds = ts; - fracNanoseconds = (int)(ts*1_000_000%1_000_000_000); + fracNanoseconds = (int) (ts * 1_000_000 % 1_000_000_000); break; } case 1: { - milliseconds = ts/1_000; - fracNanoseconds = (int)(ts*1_000%1_000_000_000); + milliseconds = ts / 1_000; + fracNanoseconds = (int) (ts * 1_000 % 1_000_000_000); break; } case 2: { - milliseconds = ts/1_000_000; - fracNanoseconds = (int)(ts%1_000_000_000); + milliseconds = ts / 1_000_000; + fracNanoseconds = (int) (ts % 1_000_000_000); break; } default: { @@ -450,16 +464,33 @@ public class TSDBResultSetRowData { data.set(col, tsObj); } + /** + * this implementation is used for TDengine old version + */ + public void setTimestamp(int col, long ts) { + //TODO: this implementation contains logical error + // when precision is us the (long ts) is 16 digital number + // when precision is ms, the (long ts) is 13 digital number + // we need a JNI function like this: + // public void setTimestamp(int col, long epochSecond, long nanoAdjustment) + if (ts < 1_0000_0000_0000_0L) { + data.set(col, new Timestamp(ts)); + } else { + long epochSec = ts / 1000_000L; + long nanoAdjustment = ts % 1000_000L * 1000L; + Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + data.set(col, timestamp); + } + } + public Timestamp getTimestamp(int col, int nativeType) { Object obj = data.get(col - 1); if (obj == null) return null; - switch (nativeType) { - case TSDBConstants.TSDB_DATA_TYPE_BIGINT: - return new Timestamp((Long) obj); - default: - return (Timestamp) obj; + if (nativeType == TSDBConstants.TSDB_DATA_TYPE_BIGINT) { + return new Timestamp((Long) obj); } + return (Timestamp) obj; } public Object getObject(int col) { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java index c5fd497ca3dfed8bc8555110660ff70a9fd23447..d74f7755b24f1eb59059fb5ad5ead48d420ec7f6 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBSubscribe.java @@ -47,9 +47,6 @@ public class TSDBSubscribe { /** * close subscription - * - * @param keepProgress - * @throws SQLException */ public void close(boolean keepProgress) throws SQLException { if (this.connecter.isClosed()) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampFormat.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampFormat.java new file mode 100644 index 0000000000000000000000000000000000000000..5ff0774639fb20c575d46c41d977b4ce3a16de10 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampFormat.java @@ -0,0 +1,7 @@ +package com.taosdata.jdbc.enums; + +public enum TimestampFormat { + STRING, + TIMESTAMP, + UTC +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java new file mode 100644 index 0000000000000000000000000000000000000000..79350076c7f4b31743ab9fb61226e506186f0f17 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/TimestampPrecision.java @@ -0,0 +1,8 @@ +package com.taosdata.jdbc.enums; + +public enum TimestampPrecision { + MS, + US, + NS, + UNKNOWN +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index d6a02b7e3a7d6ef2d7527cd101743cc4575b43ba..12a0ab57e2c35c7f1f550dd213db19a0effd4ebc 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -16,7 +16,7 @@ public class RestfulConnection extends AbstractConnection { private final String host; private final int port; private final String url; - private volatile String database; + private final String database; private final String token; /******************************************************/ private boolean isClosed; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java index f58e3f8cd2406e6372900c2f7b2547a450fb7fe9..f2abbd24454ff13f708636b61bcf93be2e0d47b5 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java @@ -13,7 +13,7 @@ import java.util.Calendar; public class RestfulPreparedStatement extends RestfulStatement implements PreparedStatement { - private ParameterMetaData parameterMetaData; + private final ParameterMetaData parameterMetaData; private final String rawSql; private Object[] parameters; private boolean isPrepared; @@ -22,16 +22,16 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar super(conn, database); this.rawSql = sql; + int parameterCnt = 0; if (sql.contains("?")) { - int parameterCnt = 0; for (int i = 0; i < sql.length(); i++) { if ('?' == sql.charAt(i)) { parameterCnt++; } } - parameters = new Object[parameterCnt]; this.isPrepared = true; } + parameters = new Object[parameterCnt]; // build parameterMetaData this.parameterMetaData = new RestfulParameterMetaData(parameters); @@ -44,7 +44,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar if (!isPrepared) return executeQuery(this.rawSql); - final String sql = getNativeSql(this.rawSql); + final String sql = Utils.getNativeSql(this.rawSql, this.parameters); return executeQuery(sql); } @@ -55,20 +55,10 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar if (!isPrepared) return executeUpdate(this.rawSql); - final String sql = getNativeSql(this.rawSql); + final String sql = Utils.getNativeSql(rawSql, this.parameters); return executeUpdate(sql); } - /**** - * 将rawSql转换成一条可执行的sql语句,使用属性parameters中的变脸进行替换 - * 对于insert into ?.? (?,?,?) using ?.? (?,?,?) tags(?, ?, ?) values(?, ?, ?) - * @param rawSql,可能是insert、select或其他,使用?做占位符 - * @return - */ - private String getNativeSql(String rawSql) { - return Utils.getNativeSql(rawSql, this.parameters); - } - @Override public void setNull(int parameterIndex, int sqlType) throws SQLException { if (isClosed()) @@ -224,16 +214,13 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); if (!isPrepared) return execute(this.rawSql); - final String sql = getNativeSql(rawSql); + final String sql = Utils.getNativeSql(rawSql, this.parameters); return execute(sql); } @Override public void addBatch() throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - - final String sql = getNativeSql(this.rawSql); + final String sql = Utils.getNativeSql(rawSql, this.parameters); addBatch(sql); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 530b433d42bf4b387db0e9973ddcf48e3844f2f3..1ea39236b666fda106c3ee3534560b6380d7bec9 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -6,6 +6,8 @@ import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import com.google.common.primitives.Shorts; import com.taosdata.jdbc.*; +import com.taosdata.jdbc.enums.TimestampPrecision; +import com.taosdata.jdbc.enums.TimestampFormat; import com.taosdata.jdbc.utils.Utils; import java.math.BigDecimal; @@ -15,19 +17,20 @@ import java.time.ZoneOffset; import java.time.format.DateTimeParseException; import java.util.ArrayList; import java.util.Calendar; +import java.util.List; public class RestfulResultSet extends AbstractResultSet implements ResultSet { - private volatile boolean isClosed; - private int pos = -1; - private final String database; private final Statement statement; // data - private final ArrayList> resultSet = new ArrayList<>(); + private final List> resultSet = new ArrayList<>(); // meta - private ArrayList columnNames = new ArrayList<>(); - private ArrayList columns = new ArrayList<>(); - private RestfulResultSetMetaData metaData; + private final List columnNames = new ArrayList<>(); + private final List columns = new ArrayList<>(); + private final RestfulResultSetMetaData metaData; + + private volatile boolean isClosed; + private int pos = -1; /** * 由一个result的Json构造结果集,对应执行show databases, show tables等这些语句,返回结果集,但无法获取结果集对应的meta,统一当成String处理 @@ -35,34 +38,30 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { * @param resultJson: 包含data信息的结果集,有sql返回的结果集 ***/ public RestfulResultSet(String database, Statement statement, JSONObject resultJson) throws SQLException { - this.database = database; this.statement = statement; + // get head + JSONArray head = resultJson.getJSONArray("head"); // get column metadata JSONArray columnMeta = resultJson.getJSONArray("column_meta"); // get row data JSONArray data = resultJson.getJSONArray("data"); - if (data == null || data.isEmpty()) { - columnNames.clear(); - columns.clear(); - this.resultSet.clear(); - return; - } - // get head - JSONArray head = resultJson.getJSONArray("head"); // get rows Integer rows = resultJson.getInteger("rows"); + // parse column_meta if (columnMeta != null) { parseColumnMeta_new(columnMeta); } else { parseColumnMeta_old(head, data, rows); } - this.metaData = new RestfulResultSetMetaData(this.database, columns, this); + this.metaData = new RestfulResultSetMetaData(database, columns, this); + + if (data == null || data.isEmpty()) + return; // parse row data - resultSet.clear(); for (int rowIndex = 0; rowIndex < data.size(); rowIndex++) { - ArrayList row = new ArrayList(); + List row = new ArrayList<>(); JSONArray jsonRow = data.getJSONArray(rowIndex); for (int colIndex = 0; colIndex < this.metaData.getColumnCount(); colIndex++) { row.add(parseColumnData(jsonRow, colIndex, columns.get(colIndex).taos_type)); @@ -131,7 +130,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - private Object parseColumnData(JSONArray row, int colIndex, int taosType) throws SQLException { switch (taosType) { case TSDBConstants.TSDB_DATA_TYPE_NULL: @@ -150,44 +148,8 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return row.getFloat(colIndex); case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: return row.getDouble(colIndex); - case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: { - if (row.get(colIndex) == null) - return null; - String timestampFormat = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT); - if ("TIMESTAMP".equalsIgnoreCase(timestampFormat)) { - Long value = row.getLong(colIndex); - //TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9 - if (value < 1_0000_0000_0000_0L) - return new Timestamp(value); - long epochSec = value / 1000_000l; - long nanoAdjustment = value % 1000_000l * 1000l; - return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); - } - if ("UTC".equalsIgnoreCase(timestampFormat)) { - String value = row.getString(colIndex); - long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000; - int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5)); - long nanoAdjustment = 0; - if (value.length() > 28) { - // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 - nanoAdjustment = fractionalSec * 1000l; - } else { - // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 - nanoAdjustment = fractionalSec * 1000_000l; - } - ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); - Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); - return Timestamp.from(instant); - } - String value = row.getString(colIndex); - if (value.length() <= 23) // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS - return row.getTimestamp(colIndex); - // us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS - long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; - long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000l; - Timestamp timestamp = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); - return timestamp; - } + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + return parseTimestampColumnData(row, colIndex); case TSDBConstants.TSDB_DATA_TYPE_BINARY: return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: @@ -197,7 +159,66 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } - public class Field { + private Timestamp parseTimestampColumnData(JSONArray row, int colIndex) throws SQLException { + if (row.get(colIndex) == null) + return null; + String tsFormatUpperCase = this.statement.getConnection().getClientInfo(TSDBDriver.PROPERTY_KEY_TIMESTAMP_FORMAT).toUpperCase(); + TimestampFormat timestampFormat = TimestampFormat.valueOf(tsFormatUpperCase); + switch (timestampFormat) { + case TIMESTAMP: { + Long value = row.getLong(colIndex); + //TODO: this implementation has bug if the timestamp bigger than 9999_9999_9999_9 + if (value < 1_0000_0000_0000_0L) + return new Timestamp(value); + long epochSec = value / 1000_000L; + long nanoAdjustment = value % 1000_000L * 1000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + case UTC: { + String value = row.getString(colIndex); + long epochSec = Timestamp.valueOf(value.substring(0, 19).replace("T", " ")).getTime() / 1000; + int fractionalSec = Integer.parseInt(value.substring(20, value.length() - 5)); + long nanoAdjustment; + if (value.length() > 31) { + // ns timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSSSSS+0x00 + nanoAdjustment = fractionalSec; + } else if (value.length() > 28) { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSSSSS+0x00 + nanoAdjustment = fractionalSec * 1000L; + } else { + // ms timestamp: yyyy-MM-ddTHH:mm:ss.SSS+0x00 + nanoAdjustment = fractionalSec * 1000_000L; + } + ZoneOffset zoneOffset = ZoneOffset.of(value.substring(value.length() - 5)); + Instant instant = Instant.ofEpochSecond(epochSec, nanoAdjustment).atOffset(zoneOffset).toInstant(); + return Timestamp.from(instant); + } + case STRING: + default: { + String value = row.getString(colIndex); + TimestampPrecision precision = Utils.guessTimestampPrecision(value); + if (precision == TimestampPrecision.MS) { + // ms timestamp: yyyy-MM-dd HH:mm:ss.SSS + return row.getTimestamp(colIndex); + } + if (precision == TimestampPrecision.US) { + // us timestamp: yyyy-MM-dd HH:mm:ss.SSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)) * 1000L; + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + if (precision == TimestampPrecision.NS) { + // ms timestamp: yyyy-MM-dd HH:mm:ss.SSSSSSSSS + long epochSec = Timestamp.valueOf(value.substring(0, 19)).getTime() / 1000; + long nanoAdjustment = Integer.parseInt(value.substring(20)); + return Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN_TIMESTAMP_PRECISION); + } + } + } + + public static class Field { String name; int type; int length; @@ -211,6 +232,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { this.note = note; this.taos_type = taos_type; } + } @Override @@ -218,10 +240,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); pos++; - if (pos <= resultSet.size() - 1) { - return true; - } - return false; + return pos <= resultSet.size() - 1; } @Override @@ -231,13 +250,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { } } -// @Override -// public boolean wasNull() throws SQLException { -// if (isClosed()) -// throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); -// return resultSet.isEmpty(); -// } - @Override public String getString(int columnIndex) throws SQLException { checkAvailability(columnIndex, resultSet.get(pos).size()); @@ -262,7 +274,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { wasNull = false; if (value instanceof Boolean) return (boolean) value; - return Boolean.valueOf(value.toString()); + return Boolean.parseBoolean(value.toString()); } @Override @@ -334,6 +346,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { wasNull = true; return 0; } + wasNull = false; if (value instanceof Timestamp) { return ((Timestamp) value).getTime(); @@ -416,9 +429,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return null; if (value instanceof Timestamp) return new Date(((Timestamp) value).getTime()); - Date date = null; - date = Utils.parseDate(value.toString()); - return date; + return Utils.parseDate(value.toString()); } @Override @@ -433,8 +444,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { Time time = null; try { time = Utils.parseTime(value.toString()); - } catch (DateTimeParseException e) { - time = null; + } catch (DateTimeParseException ignored) { } return time; } @@ -498,9 +508,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return null; if (value instanceof Long || value instanceof Integer || value instanceof Short || value instanceof Byte) - return new BigDecimal(Long.valueOf(value.toString())); + return new BigDecimal(Long.parseLong(value.toString())); if (value instanceof Double || value instanceof Float) - return new BigDecimal(Double.valueOf(value.toString())); + return BigDecimal.valueOf(Double.parseDouble(value.toString())); if (value instanceof Timestamp) return new BigDecimal(((Timestamp) value).getTime()); BigDecimal ret; @@ -610,36 +620,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); -// if (this.resultSet.size() == 0) -// return false; -// -// if (row == 0) { -// beforeFirst(); -// return false; -// } else if (row == 1) { -// return first(); -// } else if (row == -1) { -// return last(); -// } else if (row > this.resultSet.size()) { -// afterLast(); -// return false; -// } else { -// if (row < 0) { -// // adjust to reflect after end of result set -// int newRowPosition = this.resultSet.size() + row + 1; -// if (newRowPosition <= 0) { -// beforeFirst(); -// return false; -// } else { -// return absolute(newRowPosition); -// } -// } else { -// row--; // adjust for index difference -// this.pos = row; -// return true; -// } -// } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -683,5 +663,4 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return isClosed; } - -} +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java index 7ead8bd1bbaeeb8fe1f24c951656888f6bba6b6f..148f77974fcec9a997ade815f56b12564e537f58 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSetMetaData.java @@ -7,21 +7,20 @@ import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Timestamp; import java.sql.Types; -import java.util.ArrayList; +import java.util.Collections; +import java.util.List; public class RestfulResultSetMetaData extends WrapperImpl implements ResultSetMetaData { private final String database; - private ArrayList fields; - private final RestfulResultSet resultSet; + private final List fields; - public RestfulResultSetMetaData(String database, ArrayList fields, RestfulResultSet resultSet) { + public RestfulResultSetMetaData(String database, List fields, RestfulResultSet resultSet) { this.database = database; - this.fields = fields; - this.resultSet = resultSet; + this.fields = fields == null ? Collections.emptyList() : fields; } - public ArrayList getFields() { + public List getFields() { return fields; } @@ -139,8 +138,8 @@ public class RestfulResultSetMetaData extends WrapperImpl implements ResultSetMe @Override public String getColumnTypeName(int column) throws SQLException { - int taos_type = fields.get(column - 1).taos_type; - return TSDBConstants.taosType2JdbcTypeName(taos_type); + int taosType = fields.get(column - 1).taos_type; + return TSDBConstants.taosType2JdbcTypeName(taosType); } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java index e9d193f6b412f6ab835d39f97a229f137e48cacf..f8acd8f06180476a09519c0809dd493d062c911c 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java @@ -35,10 +35,6 @@ public class RestfulStatement extends AbstractStatement { if (!SqlSyntaxValidator.isValidForExecuteQuery(sql)) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_QUERY, "not a valid sql for executeQuery: " + sql); - if (SqlSyntaxValidator.isDatabaseUnspecifiedQuery(sql)) { - return executeOneQuery(sql); - } - return executeOneQuery(sql); } @@ -50,9 +46,6 @@ public class RestfulStatement extends AbstractStatement { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_FOR_EXECUTE_UPDATE, "not a valid sql for executeUpdate: " + sql); final String url = "http://" + conn.getHost() + ":" + conn.getPort() + "/rest/sql"; - if (SqlSyntaxValidator.isDatabaseUnspecifiedUpdate(sql)) { - return executeOneUpdate(url, sql); - } return executeOneUpdate(url, sql); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java index b1c6dae6b3543337ae253c4cf94c2aaa5a31f9f5..de26ab7f1f458a4587ce15bebab3c2c1b0dbc070 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java @@ -1,8 +1,12 @@ package com.taosdata.jdbc.utils; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; import org.apache.http.HeaderElement; import org.apache.http.HeaderElementIterator; import org.apache.http.HttpEntity; +import org.apache.http.client.ClientProtocolException; +import org.apache.http.client.HttpRequestRetryHandler; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.*; import org.apache.http.client.protocol.HttpClientContext; @@ -16,33 +20,24 @@ import org.apache.http.protocol.HTTP; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; -import java.nio.charset.Charset; - +import javax.net.ssl.SSLException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; +import java.sql.SQLException; public class HttpClientPoolUtil { private static final String DEFAULT_CONTENT_TYPE = "application/json"; - private static final String DEFAULT_TOKEN = "cm9vdDp0YW9zZGF0YQ=="; + private static final int DEFAULT_MAX_TOTAL = 200; + private static final int DEFAULT_MAX_PER_ROUTE = 20; private static final int DEFAULT_TIME_OUT = 15000; - private static final int DEFAULT_MAX_PER_ROUTE = 32; - private static final int DEFAULT_MAX_TOTAL = 1000; private static final int DEFAULT_HTTP_KEEP_TIME = 15000; + private static final int DEFAULT_MAX_RETRY_COUNT = 5; - private static PoolingHttpClientConnectionManager connectionManager; - private static CloseableHttpClient httpClient; - - private static synchronized void initPools() { - if (httpClient == null) { - connectionManager = new PoolingHttpClientConnectionManager(); - connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); - connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); - httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).build(); - } - } - - private static ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { + private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); - int keepTime = DEFAULT_HTTP_KEEP_TIME * 1000; while (it.hasNext()) { HeaderElement headerElement = it.nextElement(); String param = headerElement.getName(); @@ -50,81 +45,105 @@ public class HttpClientPoolUtil { if (value != null && param.equalsIgnoreCase("timeout")) { try { return Long.parseLong(value) * 1000; - } catch (Exception e) { - new Exception("format KeepAlive timeout exception, exception:" + e.toString()).printStackTrace(); + } catch (NumberFormatException ignore) { } } } - return keepTime; + return DEFAULT_HTTP_KEEP_TIME * 1000; + }; + + private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> { + if (executionCount >= DEFAULT_MAX_RETRY_COUNT) + // do not retry if over max retry count + return false; + if (exception instanceof InterruptedIOException) + // timeout + return false; + if (exception instanceof UnknownHostException) + // unknown host + return false; + if (exception instanceof SSLException) + // SSL handshake exception + return false; + return true; }; - /** - * 执行http post请求 - * 默认采用Content-Type:application/json,Accept:application/json - * - * @param uri 请求地址 - * @param data 请求数据 - * @return responseBody - */ - public static String execute(String uri, String data, String token) { - long startTime = System.currentTimeMillis(); + private static CloseableHttpClient httpClient; + + static { + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); + connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); + httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build(); + } + + /*** execute GET request ***/ + public static String execute(String uri) throws SQLException { HttpEntity httpEntity = null; - HttpEntityEnclosingRequestBase method = null; String responseBody = ""; try { - if (httpClient == null) { - initPools(); + HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); + HttpContext context = HttpClientContext.create(); + CloseableHttpResponse httpResponse = httpClient.execute(method, context); + httpEntity = httpResponse.getEntity(); + if (httpEntity != null) { + responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } - method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); - method.setHeader("Content-Type", "text/plain"); - method.setHeader("Connection", "keep-alive"); + } catch (ClientProtocolException e) { + e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); + } catch (IOException exception) { + exception.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); + } finally { + if (httpEntity != null) { + EntityUtils.consumeQuietly(httpEntity); + } + } + return responseBody; + } + + + /*** execute POST request ***/ + public static String execute(String uri, String data, String token) throws SQLException { + HttpEntity httpEntity = null; + String responseBody = ""; + try { + HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME); + method.setHeader(HTTP.CONTENT_TYPE, "text/plain"); + method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE); method.setHeader("Authorization", "Taosd " + token); - method.setEntity(new StringEntity(data, Charset.forName("UTF-8"))); + method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); HttpContext context = HttpClientContext.create(); CloseableHttpResponse httpResponse = httpClient.execute(method, context); httpEntity = httpResponse.getEntity(); if (httpEntity != null) { - responseBody = EntityUtils.toString(httpEntity, "UTF-8"); + responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } - } catch (Exception e) { - if (method != null) { - method.abort(); - } - new Exception("execute post request exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); + } catch (ClientProtocolException e) { + e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); + } catch (IOException exception) { + exception.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); } finally { if (httpEntity != null) { - try { - EntityUtils.consumeQuietly(httpEntity); - } catch (Exception e) { - new Exception("close response exception, url:" + uri + ", exception:" + e.toString() + ", cost time(ms):" + (System.currentTimeMillis() - startTime)).printStackTrace(); - } + EntityUtils.consumeQuietly(httpEntity); } } return responseBody; } - /** - * * 创建请求 - * - * @param uri 请求url - * @param methodName 请求的方法类型 - * @param contentType contentType类型 - * @param timeout 超时时间 - * @return HttpRequestBase 返回类型 - * @author lisc - */ - private static HttpRequestBase getRequest(String uri, String methodName, String contentType, int timeout) { - if (httpClient == null) { - initPools(); - } + /*** create http request ***/ + private static HttpRequestBase getRequest(String uri, String methodName) { HttpRequestBase method; - if (timeout <= 0) { - timeout = DEFAULT_TIME_OUT; - } - RequestConfig requestConfig = RequestConfig.custom().setSocketTimeout(timeout * 1000) - .setConnectTimeout(timeout * 1000).setConnectionRequestTimeout(timeout * 1000) - .setExpectContinueEnabled(false).build(); + RequestConfig requestConfig = RequestConfig.custom() + .setSocketTimeout(DEFAULT_TIME_OUT * 1000) + .setConnectTimeout(DEFAULT_TIME_OUT * 1000) + .setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000) + .setExpectContinueEnabled(false) + .build(); if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) { method = new HttpPut(uri); } else if (HttpPost.METHOD_NAME.equalsIgnoreCase(methodName)) { @@ -134,62 +153,10 @@ public class HttpClientPoolUtil { } else { method = new HttpPost(uri); } - - if (contentType == null || contentType.isEmpty() || contentType.replaceAll("\\s", "").isEmpty()) { - contentType = DEFAULT_CONTENT_TYPE; - } - method.addHeader("Content-Type", contentType); - method.addHeader("Accept", contentType); + method.addHeader(HTTP.CONTENT_TYPE, DEFAULT_CONTENT_TYPE); + method.addHeader("Accept", DEFAULT_CONTENT_TYPE); method.setConfig(requestConfig); return method; } - /** - * 执行GET 请求 - * - * @param uri 网址 - * @return responseBody - */ - public static String execute(String uri) { - long startTime = System.currentTimeMillis(); - HttpEntity httpEntity = null; - HttpRequestBase method = null; - String responseBody = ""; - try { - if (httpClient == null) { - initPools(); - } - method = getRequest(uri, HttpGet.METHOD_NAME, DEFAULT_CONTENT_TYPE, 0); - HttpContext context = HttpClientContext.create(); - CloseableHttpResponse httpResponse = httpClient.execute(method, context); - httpEntity = httpResponse.getEntity(); - if (httpEntity != null) { - responseBody = EntityUtils.toString(httpEntity, "UTF-8"); -// logger.info("请求URL: " + uri + "+ 返回状态码:" + httpResponse.getStatusLine().getStatusCode()); - } - } catch (Exception e) { - if (method != null) { - method.abort(); - } - e.printStackTrace(); -// logger.error("execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" -// + (System.currentTimeMillis() - startTime)); - System.out.println("log:调用 HttpClientPoolUtil execute get request exception, url:" + uri + ", exception:" + e.toString() + ",cost time(ms):" - + (System.currentTimeMillis() - startTime)); - } finally { - if (httpEntity != null) { - try { - EntityUtils.consumeQuietly(httpEntity); - } catch (Exception e) { -// e.printStackTrace(); -// logger.error("close response exception, url:" + uri + ", exception:" + e.toString() -// + ",cost time(ms):" + (System.currentTimeMillis() - startTime)); - new Exception("close response exception, url:" + uri + ", exception:" + e.toString() - + ",cost time(ms):" + (System.currentTimeMillis() - startTime)) - .printStackTrace(); - } - } - } - return responseBody; - } } \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java index a67b4763f99164a58439df25d944cac913ab36d9..1af3e9666c238b8728ffc396a3c2f8998ab931a0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java @@ -4,14 +4,14 @@ public class OSUtils { private static final String OS = System.getProperty("os.name").toLowerCase(); public static boolean isWindows() { - return OS.indexOf("win") >= 0; + return OS.contains("win"); } public static boolean isMac() { - return OS.indexOf("mac") >= 0; + return OS.contains("mac"); } public static boolean isLinux() { - return OS.indexOf("nux") >= 0; + return OS.contains("nux"); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java index 251ca2af013e2b1c9cb314b776621455c91d9384..0f99ff4f661ec48f1df4bba07cf50a410084b7df 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/SqlSyntaxValidator.java @@ -14,24 +14,14 @@ *****************************************************************************/ package com.taosdata.jdbc.utils; -import com.taosdata.jdbc.TSDBConnection; - -import java.sql.Connection; - public class SqlSyntaxValidator { - private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe"}; + private static final String[] SQL = {"select", "insert", "import", "create", "use", "alter", "drop", "set", "show", "describe", "reset"}; private static final String[] updateSQL = {"insert", "import", "create", "use", "alter", "drop", "set"}; private static final String[] querySQL = {"select", "show", "describe"}; private static final String[] databaseUnspecifiedShow = {"databases", "dnodes", "mnodes", "variables"}; - private TSDBConnection tsdbConnection; - - public SqlSyntaxValidator(Connection connection) { - this.tsdbConnection = (TSDBConnection) connection; - } - public static boolean isValidForExecuteUpdate(String sql) { for (String prefix : updateSQL) { if (sql.trim().toLowerCase().startsWith(prefix)) @@ -71,29 +61,11 @@ public class SqlSyntaxValidator { public static boolean isUseSql(String sql) { return sql.trim().toLowerCase().startsWith("use"); -// || sql.trim().toLowerCase().matches("create\\s*database.*") || sql.toLowerCase().toLowerCase().matches("drop\\s*database.*"); - } - - public static boolean isShowSql(String sql) { - return sql.trim().toLowerCase().startsWith("show"); - } - - public static boolean isDescribeSql(String sql) { - return sql.trim().toLowerCase().startsWith("describe"); - } - - - public static boolean isInsertSql(String sql) { - return sql.trim().toLowerCase().startsWith("insert") || sql.trim().toLowerCase().startsWith("import"); } public static boolean isSelectSql(String sql) { return sql.trim().toLowerCase().startsWith("select"); } - public static boolean isShowDatabaseSql(String sql) { - return sql.trim().toLowerCase().matches("show\\s*databases"); - } - } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java index ee1364ce217c639cbf864f87f83ad6f5ddd6c137..a427103770cff7f51355024688454824d7263c77 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java @@ -7,9 +7,9 @@ import java.util.concurrent.atomic.AtomicLong; public class TaosInfo implements TaosInfoMBean { private static volatile TaosInfo instance; - private AtomicLong connect_open = new AtomicLong(); - private AtomicLong connect_close = new AtomicLong(); - private AtomicLong statement_count = new AtomicLong(); + private final AtomicLong connect_open = new AtomicLong(); + private final AtomicLong connect_close = new AtomicLong(); + private final AtomicLong statement_count = new AtomicLong(); static { try { diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java index 8ab610fec6541e96d8b4b997c6883dddc01f1549..9cf61903f001e84f237e25c3c10fdbb8aac28fd7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java @@ -3,14 +3,13 @@ package com.taosdata.jdbc.utils; import com.google.common.collect.Range; import com.google.common.collect.RangeSet; import com.google.common.collect.TreeRangeSet; +import com.taosdata.jdbc.enums.TimestampPrecision; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; -import java.time.LocalDate; import java.time.LocalDateTime; -import java.time.LocalTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.DateTimeParseException; @@ -23,46 +22,58 @@ import java.util.stream.IntStream; public class Utils { - private static Pattern ptn = Pattern.compile(".*?'"); - - private static final DateTimeFormatter formatter = new DateTimeFormatterBuilder() - .appendPattern("yyyy-MM-dd HH:mm:ss.SSS").toFormatter(); - private static final DateTimeFormatter formatter2 = new DateTimeFormatterBuilder() - .appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSS").toFormatter(); + private static final Pattern ptn = Pattern.compile(".*?'"); + private static final DateTimeFormatter milliSecFormatter = new DateTimeFormatterBuilder().appendPattern("yyyy-MM-dd HH:mm:ss.SSS").toFormatter(); + private static final DateTimeFormatter microSecFormatter = new DateTimeFormatterBuilder().appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSS").toFormatter(); + private static final DateTimeFormatter nanoSecFormatter = new DateTimeFormatterBuilder().appendPattern("yyyy-MM-dd HH:mm:ss.SSSSSSSSS").toFormatter(); public static Time parseTime(String timestampStr) throws DateTimeParseException { - LocalTime time; - try { - time = LocalTime.parse(timestampStr, formatter); - } catch (DateTimeParseException e) { - time = LocalTime.parse(timestampStr, formatter2); - } - return Time.valueOf(time); + LocalDateTime dateTime = parseLocalDateTime(timestampStr); + return dateTime != null ? Time.valueOf(dateTime.toLocalTime()) : null; } - public static Date parseDate(String timestampStr) throws DateTimeParseException { - LocalDate date; - try { - date = LocalDate.parse(timestampStr, formatter); - } catch (DateTimeParseException e) { - date = LocalDate.parse(timestampStr, formatter2); - } - return Date.valueOf(date); + public static Date parseDate(String timestampStr) { + LocalDateTime dateTime = parseLocalDateTime(timestampStr); + return dateTime != null ? Date.valueOf(String.valueOf(dateTime)) : null; } public static Timestamp parseTimestamp(String timeStampStr) { - LocalDateTime dateTime; + LocalDateTime dateTime = parseLocalDateTime(timeStampStr); + return dateTime != null ? Timestamp.valueOf(dateTime) : null; + } + + private static LocalDateTime parseLocalDateTime(String timeStampStr) { try { - dateTime = LocalDateTime.parse(timeStampStr, formatter); + return parseMilliSecTimestamp(timeStampStr); } catch (DateTimeParseException e) { - dateTime = LocalDateTime.parse(timeStampStr, formatter2); + try { + return parseMicroSecTimestamp(timeStampStr); + } catch (DateTimeParseException ee) { + try { + return parseNanoSecTimestamp(timeStampStr); + } catch (DateTimeParseException eee) { + eee.printStackTrace(); + } + } } - return Timestamp.valueOf(dateTime); + return null; + } + + private static LocalDateTime parseMilliSecTimestamp(String timeStampStr) throws DateTimeParseException { + return LocalDateTime.parse(timeStampStr, milliSecFormatter); + } + + private static LocalDateTime parseMicroSecTimestamp(String timeStampStr) throws DateTimeParseException { + return LocalDateTime.parse(timeStampStr, microSecFormatter); + } + + private static LocalDateTime parseNanoSecTimestamp(String timeStampStr) throws DateTimeParseException { + return LocalDateTime.parse(timeStampStr, nanoSecFormatter); } public static String escapeSingleQuota(String origin) { Matcher m = ptn.matcher(origin); - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); int end = 0; while (m.find()) { end = m.end(); @@ -75,7 +86,7 @@ public class Utils { sb.append(seg); } } else { // len > 1 - sb.append(seg.substring(0, seg.length() - 2)); + sb.append(seg, 0, seg.length() - 2); char lastcSec = seg.charAt(seg.length() - 2); if (lastcSec == '\\') { sb.append("\\'"); @@ -93,17 +104,31 @@ public class Utils { } public static String getNativeSql(String rawSql, Object[] parameters) { + if (parameters == null || !rawSql.contains("?")) + return rawSql; // toLowerCase String preparedSql = rawSql.trim().toLowerCase(); - String[] clause = new String[]{"values\\s*\\(.*?\\)", "tags\\s*\\(.*?\\)", "where\\s*.*"}; + String[] clause = new String[]{"tags\\s*\\([\\s\\S]*?\\)", "where[\\s\\S]*"}; Map placeholderPositions = new HashMap<>(); RangeSet clauseRangeSet = TreeRangeSet.create(); findPlaceholderPosition(preparedSql, placeholderPositions); + // find tags and where clause's position findClauseRangeSet(preparedSql, clause, clauseRangeSet); + // find values clause's position + findValuesClauseRangeSet(preparedSql, clauseRangeSet); return transformSql(rawSql, parameters, placeholderPositions, clauseRangeSet); } + private static void findValuesClauseRangeSet(String preparedSql, RangeSet clauseRangeSet) { + Matcher matcher = Pattern.compile("(values|,)\\s*(\\([^)]*\\))").matcher(preparedSql); + while (matcher.find()) { + int start = matcher.start(2); + int end = matcher.end(2); + clauseRangeSet.add(Range.closedOpen(start, end)); + } + } + private static void findClauseRangeSet(String preparedSql, String[] regexArr, RangeSet clauseRangeSet) { clauseRangeSet.clear(); for (String regex : regexArr) { @@ -111,7 +136,7 @@ public class Utils { while (matcher.find()) { int start = matcher.start(); int end = matcher.end(); - clauseRangeSet.add(Range.closed(start, end)); + clauseRangeSet.add(Range.closedOpen(start, end)); } } } @@ -146,7 +171,7 @@ public class Utils { String paraStr; if (para != null) { if (para instanceof byte[]) { - paraStr = new String((byte[]) para, Charset.forName("UTF-8")); + paraStr = new String((byte[]) para, StandardCharsets.UTF_8); } else { paraStr = para.toString(); } @@ -167,13 +192,47 @@ public class Utils { }).collect(Collectors.joining()); } - public static String formatTimestamp(Timestamp timestamp) { int nanos = timestamp.getNanos(); - if (nanos % 1000000l != 0) - return timestamp.toLocalDateTime().format(formatter2); - return timestamp.toLocalDateTime().format(formatter); + if (nanos % 1000000L != 0) + return timestamp.toLocalDateTime().format(microSecFormatter); + return timestamp.toLocalDateTime().format(milliSecFormatter); + } + + public static TimestampPrecision guessTimestampPrecision(String value) { + if (isMilliSecFormat(value)) + return TimestampPrecision.MS; + if (isMicroSecFormat(value)) + return TimestampPrecision.US; + if (isNanoSecFormat(value)) + return TimestampPrecision.NS; + return TimestampPrecision.UNKNOWN; + } + + private static boolean isMilliSecFormat(String timestampStr) { + try { + milliSecFormatter.parse(timestampStr); + } catch (DateTimeParseException e) { + return false; + } + return true; } + private static boolean isMicroSecFormat(String timestampStr) { + try { + microSecFormatter.parse(timestampStr); + } catch (DateTimeParseException e) { + return false; + } + return true; + } + private static boolean isNanoSecFormat(String timestampStr) { + try { + nanoSecFormatter.parse(timestampStr); + } catch (DateTimeParseException e) { + return false; + } + return true; + } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java deleted file mode 100644 index 73ceafa7299b256d7e83064b53bd638835a4b075..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/StatementTest.java +++ /dev/null @@ -1,108 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.sql.*; -import java.util.Properties; - -public class StatementTest { - static Connection connection = null; - static Statement statement = null; - static String dbName = "test"; - static String tName = "t0"; - static String host = "localhost"; - - @BeforeClass - public static void createConnection() throws SQLException { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/?user=root&password=taosdata", properties); - statement = connection.createStatement(); - statement.executeUpdate("drop database if exists " + dbName); - - } catch (ClassNotFoundException e) { - return; - } - } - - @Test - public void testCase() { - try { - ResultSet rs = statement.executeQuery("show databases"); - ResultSetMetaData metaData = rs.getMetaData(); - while (rs.next()) { - for (int i = 1; i <= metaData.getColumnCount(); i++) { - System.out.print(metaData.getColumnLabel(i) + ":" + rs.getString(i) + "\t"); - } - System.out.println(); - } - } catch (SQLException e) { - e.printStackTrace(); - } - } - - @Test - public void createTableAndQuery() throws SQLException { - long ts = System.currentTimeMillis(); - - statement.executeUpdate("create database if not exists " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + "(ts timestamp, k1 int)"); - statement.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", 1)"); - statement.execute("select * from " + dbName + "." + tName); - ResultSet resultSet = statement.getResultSet(); - Assert.assertNotNull(resultSet); - - boolean isClosed = statement.isClosed(); - Assert.assertEquals(false, isClosed); - } - - @Test(expected = SQLException.class) - public void testUnsupport() throws SQLException { - Assert.assertNotNull(statement.unwrap(TSDBStatement.class)); - Assert.assertTrue(statement.isWrapperFor(TSDBStatement.class)); - - statement.getMaxFieldSize(); - statement.setMaxFieldSize(0); - statement.setEscapeProcessing(true); - statement.cancel(); - statement.getWarnings(); - statement.clearWarnings(); - statement.setCursorName(null); - statement.getMoreResults(); - statement.setFetchDirection(0); - statement.getFetchDirection(); - statement.getResultSetConcurrency(); - statement.getResultSetType(); - statement.getConnection(); - statement.getMoreResults(); - statement.getGeneratedKeys(); - statement.executeUpdate(null, 0); - statement.executeUpdate(null, new int[]{0}); - statement.executeUpdate(null, new String[]{"str1", "str2"}); - statement.getResultSetHoldability(); - statement.setPoolable(true); - statement.isPoolable(); - statement.closeOnCompletion(); - statement.isCloseOnCompletion(); - } - - @AfterClass - public static void close() { - try { - statement.execute("drop database if exists " + dbName); - if (statement != null) - statement.close(); - if (connection != null) - connection.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - } -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java index dc6d0d322af5c90dc0eb9d42328c9864714838a1..7cdda572a7e6950005d47798bece6cc5c6fef7d1 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java @@ -5,7 +5,6 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; -import javax.management.OperationsException; import java.sql.*; import java.util.Properties; @@ -380,14 +379,15 @@ public class TSDBConnectionTest { conn.abort(null); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void setNetworkTimeout() throws SQLException { conn.setNetworkTimeout(null, 1000); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getNetworkTimeout() throws SQLException { - conn.getNetworkTimeout(); + int networkTimeout = conn.getNetworkTimeout(); + Assert.assertEquals(0, networkTimeout); } @Test diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java index b5f8114bffe9a4490f3189a54cf724354e9394a5..88ff5d3a811e17aaabbeb0a451fbff010307ab6d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java @@ -1,16 +1,16 @@ package com.taosdata.jdbc; import org.junit.Test; -import static org.junit.Assert.*; +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; import java.sql.SQLException; import java.sql.SQLWarning; import java.util.ArrayList; import java.util.List; -import java.lang.management.ManagementFactory; -import java.lang.management.RuntimeMXBean; -import java.lang.management.ThreadMXBean; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; public class TSDBJNIConnectorTest { @@ -114,6 +114,10 @@ public class TSDBJNIConnectorTest { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_RESULT_SET_NULL); } // close statement + connector.executeQuery("use d"); + String[] lines = new String[] {"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"}; + connector.insertLines(lines); // close connection connector.closeConnection(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java index 83caf1bebb72ec813683b083388e51b9dab2afc9..dc41d85cf38c5fbedb6e5f5c26d593c8c9d5c4d7 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java @@ -1,6 +1,5 @@ package com.taosdata.jdbc; -import com.taosdata.jdbc.rs.RestfulParameterMetaData; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 3f8bef4a7ea00915925b2924b08ad8c453420416..6bddd3f42835e6706ef922f2175d6e9a36dcf509 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -2,10 +2,8 @@ package com.taosdata.jdbc; import org.junit.*; -import java.io.IOException; import java.math.BigDecimal; import java.sql.*; -import java.time.LocalTime; import java.util.ArrayList; import java.util.Random; @@ -15,6 +13,7 @@ public class TSDBPreparedStatementTest { private static Connection conn; private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; private static final String sql_select = "select * from t1 where ts >= ? and ts < ? and f1 >= ?"; + private static final String dbname = "test_pstmt_jni"; private PreparedStatement pstmt_insert; private PreparedStatement pstmt_select; @@ -26,7 +25,7 @@ public class TSDBPreparedStatementTest { long ts = System.currentTimeMillis(); pstmt_insert.setTimestamp(1, new Timestamp(ts)); pstmt_insert.setInt(2, 2); - pstmt_insert.setLong(3, 3l); + pstmt_insert.setLong(3, 3L); pstmt_insert.setFloat(4, 3.14f); pstmt_insert.setDouble(5, 3.1415); pstmt_insert.setShort(6, (short) 6); @@ -53,8 +52,8 @@ public class TSDBPreparedStatementTest { Assert.assertEquals(ts, rs.getTimestamp(1).getTime()); Assert.assertEquals(2, rs.getInt(2)); Assert.assertEquals(2, rs.getInt("f1")); - Assert.assertEquals(3l, rs.getLong(3)); - Assert.assertEquals(3l, rs.getLong("f2")); + Assert.assertEquals(3L, rs.getLong(3)); + Assert.assertEquals(3L, rs.getLong("f2")); Assert.assertEquals(3.14f, rs.getFloat(4), 0.0); Assert.assertEquals(3.14f, rs.getFloat("f3"), 0.0); Assert.assertEquals(3.1415, rs.getDouble(5), 0.0); @@ -299,101 +298,101 @@ public class TSDBPreparedStatementTest { } @Test - public void executeTest() throws SQLException { - Statement stmt = conn.createStatement(); + public void executeTest() throws SQLException { + Statement stmt = conn.createStatement(); - int numOfRows = 1000; + int numOfRows = 1000; - for (int loop = 0; loop < 10; loop++){ + for (int loop = 0; loop < 10; loop++) { stmt.execute("drop table if exists weather_test"); - stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))"); + stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))"); TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? values(?, ?, ?, ?, ?, ?, ?, ?)"); Random r = new Random(); s.setTableName("weather_test"); - - ArrayList ts = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { ts.add(System.currentTimeMillis() + i); - } + } s.setTimestamp(0, ts); int random = 10 + r.nextInt(5); - ArrayList s2 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { s2.add(null); - }else{ + } else { s2.add("分支" + i % 4); } - } - s.setNString(1, s2, 4); + } + s.setNString(1, s2, 4); random = 10 + r.nextInt(5); - ArrayList s3 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList s3 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { s3.add(null); - }else{ + } else { s3.add(r.nextFloat()); } - } + } s.setFloat(2, s3); random = 10 + r.nextInt(5); - ArrayList s4 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList s4 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { s4.add(null); - }else{ + } else { s4.add(r.nextDouble()); } } s.setDouble(3, s4); random = 10 + r.nextInt(5); - ArrayList ts2 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList ts2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { ts2.add(null); - }else{ + } else { ts2.add(System.currentTimeMillis() + i); } } s.setTimestamp(4, ts2); random = 10 + r.nextInt(5); - ArrayList vals = new ArrayList<>(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList vals = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { vals.add(null); - }else{ + } else { vals.add(r.nextInt()); - } + } } s.setInt(5, vals); random = 10 + r.nextInt(5); ArrayList sb = new ArrayList<>(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { sb.add(null); - }else{ - sb.add(i % 2 == 0 ? true : false); + } else { + sb.add(i % 2 == 0); } } - s.setBoolean(6, sb); + s.setBoolean(6, sb); random = 10 + r.nextInt(5); - ArrayList s5 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList s5 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { s5.add(null); - }else{ + } else { s5.add("test" + i % 10); } } - s.setString(7, s5, 10); + s.setString(7, s5, 10); s.columnDataAddBatch(); s.columnDataExecuteBatch(); @@ -403,54 +402,54 @@ public class TSDBPreparedStatementTest { PreparedStatement statement = conn.prepareStatement(sql); ResultSet rs = statement.executeQuery(); int rows = 0; - while(rs.next()) { + while (rs.next()) { rows++; } - Assert.assertEquals(numOfRows, rows); + Assert.assertEquals(numOfRows, rows); } } @Test - public void bindDataSelectColumnTest() throws SQLException { - Statement stmt = conn.createStatement(); + public void bindDataSelectColumnTest() throws SQLException { + Statement stmt = conn.createStatement(); - int numOfRows = 1000; + int numOfRows = 1000; - for (int loop = 0; loop < 10; loop++){ + for (int loop = 0; loop < 10; loop++) { stmt.execute("drop table if exists weather_test"); - stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))"); + stmt.execute("create table weather_test(ts timestamp, f1 nchar(4), f2 float, f3 double, f4 timestamp, f5 int, f6 bool, f7 binary(10))"); TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? (ts, f1, f7) values(?, ?, ?)"); Random r = new Random(); s.setTableName("weather_test"); - - ArrayList ts = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { ts.add(System.currentTimeMillis() + i); - } + } s.setTimestamp(0, ts); int random = 10 + r.nextInt(5); - ArrayList s2 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { s2.add(null); - }else{ + } else { s2.add("分支" + i % 4); } - } - s.setNString(1, s2, 4); + } + s.setNString(1, s2, 4); random = 10 + r.nextInt(5); - ArrayList s3 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - if(i % random == 0) { + ArrayList s3 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + if (i % random == 0) { s3.add(null); - }else{ + } else { s3.add("test" + i % 10); } } - s.setString(2, s3, 10); + s.setString(2, s3, 10); s.columnDataAddBatch(); s.columnDataExecuteBatch(); @@ -460,30 +459,30 @@ public class TSDBPreparedStatementTest { PreparedStatement statement = conn.prepareStatement(sql); ResultSet rs = statement.executeQuery(); int rows = 0; - while(rs.next()) { + while (rs.next()) { rows++; } - Assert.assertEquals(numOfRows, rows); + Assert.assertEquals(numOfRows, rows); } } @Test - public void bindDataWithSingleTagTest() throws SQLException { - Statement stmt = conn.createStatement(); + public void bindDataWithSingleTagTest() throws SQLException { + Statement stmt = conn.createStatement(); - String types[] = new String[] {"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"}; + String[] types = new String[]{"tinyint", "smallint", "int", "bigint", "bool", "float", "double", "binary(10)", "nchar(10)"}; for (String type : types) { stmt.execute("drop table if exists weather_test"); stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t " + type + ")"); int numOfRows = 1; - + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?) values(?, ?, ?)"); Random r = new Random(); s.setTableName("w1"); - - switch(type) { + + switch (type) { case "tinyint": case "smallint": case "int": @@ -508,37 +507,37 @@ public class TSDBPreparedStatementTest { default: break; } - - - ArrayList ts = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { + + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { ts.add(System.currentTimeMillis() + i); - } + } s.setTimestamp(0, ts); - + int random = 10 + r.nextInt(5); - ArrayList s2 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - s2.add("分支" + i % 4); - } + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s2.add("分支" + i % 4); + } s.setNString(1, s2, 10); - + random = 10 + r.nextInt(5); - ArrayList s3 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { - s3.add("test" + i % 4); - } + ArrayList s3 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { + s3.add("test" + i % 4); + } s.setString(2, s3, 10); - + s.columnDataAddBatch(); s.columnDataExecuteBatch(); s.columnDataCloseBatch(); - + String sql = "select * from weather_test"; PreparedStatement statement = conn.prepareStatement(sql); ResultSet rs = statement.executeQuery(); int rows = 0; - while(rs.next()) { + while (rs.next()) { rows++; } Assert.assertEquals(numOfRows, rows); @@ -547,32 +546,32 @@ public class TSDBPreparedStatementTest { @Test - public void bindDataWithMultipleTagsTest() throws SQLException { - Statement stmt = conn.createStatement(); - + public void bindDataWithMultipleTagsTest() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("drop table if exists weather_test"); stmt.execute("create table weather_test(ts timestamp, f1 nchar(10), f2 binary(10)) tags (t1 int, t2 binary(10))"); int numOfRows = 1; - TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)"); - s.setTableName("w2"); + TSDBPreparedStatement s = (TSDBPreparedStatement) conn.prepareStatement("insert into ? using weather_test tags(?,?) (ts, f2) values(?, ?)"); + s.setTableName("w2"); s.setTagInt(0, 1); s.setTagString(1, "test"); - - - ArrayList ts = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { + + + ArrayList ts = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { ts.add(System.currentTimeMillis() + i); - } + } s.setTimestamp(0, ts); - - ArrayList s2 = new ArrayList(); - for(int i = 0; i < numOfRows; i++) { + + ArrayList s2 = new ArrayList<>(); + for (int i = 0; i < numOfRows; i++) { s2.add("test" + i % 4); } s.setString(1, s2, 10); - + s.columnDataAddBatch(); s.columnDataExecuteBatch(); s.columnDataCloseBatch(); @@ -581,21 +580,20 @@ public class TSDBPreparedStatementTest { PreparedStatement statement = conn.prepareStatement(sql); ResultSet rs = statement.executeQuery(); int rows = 0; - while(rs.next()) { + while (rs.next()) { rows++; } Assert.assertEquals(numOfRows, rows); - } - - @Test + + @Test(expected = SQLException.class) public void createTwoSameDbTest() throws SQLException { - Statement stmt = conn.createStatement(); - + // when + Statement stmt = conn.createStatement(); + stmt.execute("create database dbtest"); stmt.execute("create database dbtest"); - Assert.assertThrows(SQLException.class, () -> stmt.execute("create database dbtest")); } - + @Test public void setBoolean() throws SQLException { // given @@ -789,7 +787,7 @@ public class TSDBPreparedStatementTest { public void setBigDecimal() throws SQLException { // given long ts = System.currentTimeMillis(); - BigDecimal bigDecimal = new BigDecimal(3.14444); + BigDecimal bigDecimal = new BigDecimal("3.14444"); // when pstmt_insert.setTimestamp(1, new Timestamp(ts)); @@ -842,13 +840,13 @@ public class TSDBPreparedStatementTest { } @Test - public void setBytes() throws SQLException, IOException { + public void setBytes() throws SQLException { // given long ts = System.currentTimeMillis(); byte[] f8 = "{\"name\": \"john\", \"age\": 10, \"address\": \"192.168.1.100\"}".getBytes(); // when - pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setTimestamp(1, new Timestamp(ts)); pstmt_insert.setBytes(9, f8); int result = pstmt_insert.executeUpdate(); @@ -1000,7 +998,7 @@ public class TSDBPreparedStatementTest { long ts = System.currentTimeMillis(); pstmt_insert.setTimestamp(1, new Timestamp(ts)); pstmt_insert.setInt(2, 2); - pstmt_insert.setLong(3, 3l); + pstmt_insert.setLong(3, 3L); pstmt_insert.setFloat(4, 3.14f); pstmt_insert.setDouble(5, 3.1415); pstmt_insert.setShort(6, (short) 6); @@ -1097,9 +1095,9 @@ public class TSDBPreparedStatementTest { try { conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_pstmt_jni"); - stmt.execute("create database if not exists test_pstmt_jni"); - stmt.execute("use test_pstmt_jni"); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); } } catch (SQLException e) { e.printStackTrace(); @@ -1109,6 +1107,9 @@ public class TSDBPreparedStatementTest { @AfterClass public static void afterClass() { try { + Statement statement = conn.createStatement(); + statement.execute("drop database if exists " + dbname); + statement.close(); if (conn != null) conn.close(); } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java index ec54a44b7c889236676b65fea9fdf689b7207df9..f72cbbec8c1b4c0acad1c83ffcbcb35c1fb8ea7b 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java @@ -95,13 +95,13 @@ public class TSDBResultSetTest { @Test public void getBigDecimal() throws SQLException { BigDecimal f1 = rs.getBigDecimal("f1"); - Assert.assertEquals(1609430400000l, f1.longValue()); + Assert.assertEquals(1609430400000L, f1.longValue()); BigDecimal f2 = rs.getBigDecimal("f2"); Assert.assertEquals(1, f2.intValue()); BigDecimal f3 = rs.getBigDecimal("f3"); - Assert.assertEquals(100l, f3.longValue()); + Assert.assertEquals(100L, f3.longValue()); BigDecimal f4 = rs.getBigDecimal("f4"); Assert.assertEquals(3.1415f, f4.floatValue(), 0.00000f); @@ -125,13 +125,13 @@ public class TSDBResultSetTest { Assert.assertEquals(1, Ints.fromByteArray(f2)); byte[] f3 = rs.getBytes("f3"); - Assert.assertEquals(100l, Longs.fromByteArray(f3)); + Assert.assertEquals(100L, Longs.fromByteArray(f3)); byte[] f4 = rs.getBytes("f4"); - Assert.assertEquals(3.1415f, Float.valueOf(new String(f4)), 0.000000f); + Assert.assertEquals(3.1415f, Float.parseFloat(new String(f4)), 0.000000f); byte[] f5 = rs.getBytes("f5"); - Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f); + Assert.assertEquals(3.1415926, Double.parseDouble(new String(f5)), 0.000000f); byte[] f6 = rs.getBytes("f6"); Assert.assertTrue(Arrays.equals("abc".getBytes(), f6)); @@ -223,7 +223,7 @@ public class TSDBResultSetTest { Object f3 = rs.getObject("f3"); Assert.assertEquals(Long.class, f3.getClass()); - Assert.assertEquals(100l, f3); + Assert.assertEquals(100L, f3); Object f4 = rs.getObject("f4"); Assert.assertEquals(Float.class, f4.getClass()); @@ -421,7 +421,7 @@ public class TSDBResultSetTest { @Test(expected = SQLFeatureNotSupportedException.class) public void updateLong() throws SQLException { - rs.updateLong(1, 1l); + rs.updateLong(1, 1L); } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java index 51535bc886aa0c3114a5a5bb74190300977a9ec9..c1dfa42511cdd45ade577415fe17c872e44f5fd8 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBStatementTest.java @@ -14,24 +14,6 @@ public class TSDBStatementTest { private static Connection conn; private static Statement stmt; - @Test - public void executeQuery() { - try { - ResultSet rs = stmt.executeQuery("show databases"); - Assert.assertNotNull(rs); - ResultSetMetaData meta = rs.getMetaData(); - while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); - } - System.out.println(); - } - rs.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - } - @Test public void executeUpdate() { final String dbName = ("test_" + UUID.randomUUID()).replace("-", "_").substring(0, 32); @@ -173,10 +155,6 @@ public class TSDBStatementTest { Assert.assertEquals(3, meta.getColumnCount()); int count = 0; while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); - } - System.out.println(); count++; } Assert.assertEquals(1, count); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java index 19bc5f713f9b406a943fc640fd03bb0503ed2967..4f37183e719e8eb21dcbd8dd625bd8d4d19214ce 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java @@ -1,5 +1,6 @@ package com.taosdata.jdbc.cases; +import org.junit.Assert; import org.junit.Test; import java.sql.Connection; @@ -12,21 +13,19 @@ public class AppMemoryLeakTest { @Test(expected = SQLException.class) public void testCreateTooManyConnection() throws ClassNotFoundException, SQLException { Class.forName("com.taosdata.jdbc.TSDBDriver"); - int conCnt = 0; while (true) { Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata"); - System.out.println(conCnt++ + " : " + conn); + Assert.assertNotNull(conn); } } @Test(expected = Exception.class) public void testCreateTooManyStatement() throws ClassNotFoundException, SQLException { Class.forName("com.taosdata.jdbc.TSDBDriver"); - int stmtCnt = 0; Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata"); while (true) { Statement stmt = conn.createStatement(); - System.out.println(++stmtCnt + " : " + stmt); + Assert.assertNotNull(stmt); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java similarity index 98% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java index a6fb6cfda044b4e88c5bd5509c51d114507d84f7..6702de9bdbf566eb1ecaea322d0338a64ffcd40c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AuthenticationTest.java @@ -1,4 +1,4 @@ -package com.taosdata.jdbc.rs; +package com.taosdata.jdbc.cases; import org.junit.Before; import org.junit.Test; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java new file mode 100644 index 0000000000000000000000000000000000000000..2211e0fa176c67329ac13ee4374daf4667da933b --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BadLocaleSettingTest.java @@ -0,0 +1,59 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class BadLocaleSettingTest { + + private static final String host = "127.0.0.1"; + private static final String dbName = "bad_locale_test"; + private static Connection conn; + + @Test + public void canSetLocale() { + try { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbName); + stmt.execute("create database if not exists " + dbName); + stmt.execute("use " + dbName); + stmt.execute("drop table if exists weather"); + stmt.execute("create table weather(ts timestamp, temperature float, humidity int)"); + stmt.executeUpdate("insert into weather values(1624071506435, 12.3, 4)"); + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + System.setProperty("sun.jnu.encoding", "ANSI_X3.4-1968"); + System.setProperty("file.encoding", "ANSI_X3.4-1968"); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java new file mode 100644 index 0000000000000000000000000000000000000000..2934b54b5bd2e7f5450a9a1a00cb5bddca37e945 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchErrorIgnoreTest.java @@ -0,0 +1,144 @@ +package com.taosdata.jdbc.cases; + +import org.junit.*; + +import java.sql.*; +import java.util.stream.IntStream; + +public class BatchErrorIgnoreTest { + + private static final String host = "127.0.0.1"; + + @Test + public void batchErrorThrowException() throws SQLException { + // given + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + + // when + try (Statement stmt = conn.createStatement()) { + IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("insert into t11 values(now, 11)"); + IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("select count(*) from test.weather"); + + stmt.executeBatch(); + } catch (BatchUpdateException e) { + int[] updateCounts = e.getUpdateCounts(); + Assert.assertEquals(5, updateCounts.length); + Assert.assertEquals(1, updateCounts[0]); + Assert.assertEquals(1, updateCounts[1]); + Assert.assertEquals(1, updateCounts[2]); + Assert.assertEquals(1, updateCounts[3]); + Assert.assertEquals(1, updateCounts[4]); + } + + } + + @Test + public void batchErrorIgnore() throws SQLException { + // given + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata&batchErrorIgnore=true"); + + // when + int[] results = null; + try (Statement stmt = conn.createStatement()) { + IntStream.range(1, 6).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("insert into t11 values(now, 11)"); + IntStream.range(6, 11).mapToObj(i -> "insert into test.t" + i + " values(now, " + i + "),(now + 1s, " + (10 * i) + ")").forEach(sql -> { + try { + stmt.addBatch(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.addBatch("select count(*) from test.weather"); + + results = stmt.executeBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + assert results != null; + Assert.assertEquals(12, results.length); + Assert.assertEquals(1, results[0]); + Assert.assertEquals(1, results[1]); + Assert.assertEquals(1, results[2]); + Assert.assertEquals(1, results[3]); + Assert.assertEquals(1, results[4]); + Assert.assertEquals(Statement.EXECUTE_FAILED, results[5]); + Assert.assertEquals(2, results[6]); + Assert.assertEquals(2, results[7]); + Assert.assertEquals(2, results[8]); + Assert.assertEquals(2, results[9]); + Assert.assertEquals(2, results[10]); + Assert.assertEquals(Statement.SUCCESS_NO_INFO, results[11]); + } + + @Before + public void before() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("use test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table weather (ts timestamp, f1 float) tags(t1 int)"); + IntStream.range(1, 11).mapToObj(i -> "create table t" + i + " using weather tags(" + i + ")").forEach(sql -> { + try { + stmt.execute(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + }); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test"); + stmt.execute("create database if not exists test"); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + Connection conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test"); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java index e2541e8109681569d75a7384255df09d6166c34e..e175d6d1141e125d58f2a1e4a4f64c3d1b22bfbb 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/BatchInsertTest.java @@ -60,7 +60,6 @@ public class BatchInsertTest { final int index = i; executorService.execute(() -> { try { - long startTime = System.currentTimeMillis(); Statement statement = connection.createStatement(); // get statement StringBuilder sb = new StringBuilder(); sb.append("INSERT INTO " + tablePrefix + index + " VALUES"); @@ -73,8 +72,6 @@ public class BatchInsertTest { } statement.addBatch(sb.toString()); statement.executeBatch(); - long endTime = System.currentTimeMillis(); - System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds"); connection.commit(); statement.close(); } catch (Exception e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java index 7f2d367dce4e5691603e23db8a14a4f857bb7b88..fcb6ab7aaff2406acf59262a9cdef90b628b8934 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java @@ -1,6 +1,7 @@ package com.taosdata.jdbc.cases; import com.taosdata.jdbc.TSDBDriver; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -9,13 +10,13 @@ import java.util.Properties; public class ConnectMultiTaosdByRestfulWithDifferentTokenTest { - private static String host1 = "192.168.17.156"; - private static String user1 = "root"; - private static String password1 = "tqueue"; + private static final String host1 = "192.168.17.156"; + private static final String user1 = "root"; + private static final String password1 = "tqueue"; private Connection conn1; - private static String host2 = "192.168.17.82"; - private static String user2 = "root"; - private static String password2 = "taosdata"; + private static final String host2 = "192.168.17.82"; + private static final String user2 = "root"; + private static final String password2 = "taosdata"; private Connection conn2; @Test @@ -30,10 +31,8 @@ public class ConnectMultiTaosdByRestfulWithDifferentTokenTest { try (Statement stmt = connection.createStatement()) { ResultSet rs = stmt.executeQuery("select server_status()"); ResultSetMetaData meta = rs.getMetaData(); + Assert.assertNotNull(meta); while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - System.out.println(meta.getColumnLabel(i) + ": " + rs.getString(i)); - } } } catch (SQLException e) { e.printStackTrace(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java index f97e555ad1b1acc7b6dd0024d893fcc1ccd4cc53..14c76985484857a92e174955c943caa21bdd2e72 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DatetimeBefore1970Test.java @@ -1,64 +1,76 @@ package com.taosdata.jdbc.cases; import com.taosdata.jdbc.utils.TimestampUtil; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.*; import java.sql.*; public class DatetimeBefore1970Test { - private static Connection conn; + private static final String host = "127.0.0.1"; + private Connection conn; @Test public void test() { try (Statement stmt = conn.createStatement()) { + // given stmt.executeUpdate("insert into weather(ts) values('1969-12-31 23:59:59.999')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 00:00:00.000')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 08:00:00.000')"); stmt.executeUpdate("insert into weather(ts) values('1970-01-01 07:59:59.999')"); + ResultSet rs = stmt.executeQuery("select * from weather order by ts asc"); + ResultSetMetaData metaData = rs.getMetaData(); + Assert.assertEquals(2, metaData.getColumnCount()); + + // when + rs.next(); + // then + Timestamp ts = rs.getTimestamp("ts"); + Assert.assertEquals("1969-12-31 23:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals("1970-01-01 00:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals("1970-01-01 08:00:00.000", TimestampUtil.longToDatetime(ts.getTime())); + + // when + rs.next(); + // then + ts = rs.getTimestamp("ts"); + Assert.assertEquals("1970-01-01 07:59:59.999", TimestampUtil.longToDatetime(ts.getTime())); - ResultSet rs = stmt.executeQuery("select * from weather"); - while (rs.next()) { - Timestamp ts = rs.getTimestamp("ts"); - System.out.println("long: " + ts.getTime() + ", string: " + TimestampUtil.longToDatetime(ts.getTime())); - } } catch (SQLException e) { e.printStackTrace(); } } - public static void main(String[] args) { - System.out.println("timestamp: " + Long.MAX_VALUE + ", string: " + TimestampUtil.longToDatetime(Long.MAX_VALUE)); - System.out.println("timestamp: " + Long.MIN_VALUE + ", string: " + TimestampUtil.longToDatetime(Long.MIN_VALUE)); - System.out.println("timestamp: " + 0 + ", string: " + TimestampUtil.longToDatetime(0)); - System.out.println("timestamp: " + -1 + ", string: " + TimestampUtil.longToDatetime(-1)); - String datetime = "1970-01-01 00:00:00.000"; - System.out.println("timestamp: " + TimestampUtil.datetimeToLong(datetime) + ", string: " + datetime); - datetime = "1969-12-31 23:59:59.999"; - System.out.println("timestamp: " + TimestampUtil.datetimeToLong(datetime) + ", string: " + datetime); - } - - @BeforeClass - public static void beforeClass() { + @Before + public void before() { try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - conn = DriverManager.getConnection("jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); Statement stmt = conn.createStatement(); stmt.execute("drop database if exists test_timestamp"); stmt.execute("create database if not exists test_timestamp keep 36500"); stmt.execute("use test_timestamp"); stmt.execute("create table weather(ts timestamp,f1 float)"); stmt.close(); - } catch (ClassNotFoundException | SQLException e) { + } catch (SQLException e) { e.printStackTrace(); } } - @AfterClass - public static void afterClass() { + @After + public void after() { try { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test_timestamp"); if (conn != null) conn.close(); } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4174Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DoubleQuoteInSqlTest.java similarity index 55% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4174Test.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DoubleQuoteInSqlTest.java index 2704d4cfa558ebdb6885c320d7ba775b36b99f09..212a751ec302ae02552d084248d34b35a7b21c59 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4174Test.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DoubleQuoteInSqlTest.java @@ -7,56 +7,64 @@ import org.junit.*; import java.sql.*; import java.util.Properties; -public class TD4174Test { - private Connection conn; +public class DoubleQuoteInSqlTest { private static final String host = "127.0.0.1"; + private static final String dbname = "td4174"; + + private Connection conn; @Test public void test() { + // given long ts = System.currentTimeMillis(); + JSONObject value = new JSONObject(); + value.put("name", "John Smith"); + value.put("age", 20); + + // when + int ret = 0; try (PreparedStatement pstmt = conn.prepareStatement("insert into weather values(" + ts + ", ?)")) { - JSONObject value = new JSONObject(); - value.put("name", "John Smith"); - value.put("age", 20); - Assert.assertEquals("{\"name\":\"John Smith\",\"age\":20}",value.toJSONString()); pstmt.setString(1, value.toJSONString()); - - int ret = pstmt.executeUpdate(); - Assert.assertEquals(1, ret); + ret = pstmt.executeUpdate(); } catch (SQLException e) { e.printStackTrace(); } - } - public static void main(String[] args) { - JSONObject value = new JSONObject(); - value.put("name", "John Smith"); - value.put("age", 20); - System.out.println(value.toJSONString()); + // then + Assert.assertEquals("{\"name\":\"John Smith\",\"age\":20}", value.toJSONString()); + Assert.assertEquals(1, ret); } @Before - public void before() throws SQLException { + public void before() { String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - conn = DriverManager.getConnection(url, properties); - try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists td4174"); - stmt.execute("create database if not exists td4174"); - stmt.execute("use td4174"); + try { + conn = DriverManager.getConnection(url, properties); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); stmt.execute("create table weather(ts timestamp, text binary(64))"); + } catch (SQLException e) { + e.printStackTrace(); } } @After - public void after() throws SQLException { - if (conn != null) + public void after() { + try { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.close(); conn.close(); - + } catch (SQLException e) { + e.printStackTrace(); + } } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java index 6c8aed1b066236371496813301f77db32fb4e0f9..a00c8f4f97aad40c05e71fa63e34a2253b706046 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java @@ -13,7 +13,7 @@ import java.util.Properties; public class DriverAutoloadTest { private Properties properties; - private String host = "127.0.0.1"; + private final String host = "127.0.0.1"; @Test public void testRestful() throws SQLException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java index d7603312a090bedb17ed125edf6da535924964d0..bc11c7f34eeb719574a35beaf186cf637df2826f 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ImportTest.java @@ -20,7 +20,6 @@ public class ImportTest { @BeforeClass public static void before() { try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); @@ -33,8 +32,6 @@ public class ImportTest { stmt.close(); ts = System.currentTimeMillis(); - } catch (ClassNotFoundException e) { - e.printStackTrace(); } catch (SQLException e) { e.printStackTrace(); } @@ -47,7 +44,6 @@ public class ImportTest { for (int i = 0; i < 50; i++) { ts++; int row = stmt.executeUpdate("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")"); - System.out.println("import into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row); assertEquals(1, row); } } @@ -84,7 +80,6 @@ public class ImportTest { long t = (++ts) + a; sqlBuilder.append("(").append(t).append(",").append((100 + i)).append(",").append(i).append(") "); } - System.out.println(sqlBuilder.toString()); int rows = stmt.executeUpdate(sqlBuilder.toString()); assertEquals(50, rows); } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java index 84149775c3d2bbda6a66930159e19eb27d95a6d1..beea990456ec98c2ab51fc2086034e0b31b570b6 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java @@ -13,9 +13,9 @@ import java.util.Random; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class InsertDbwithoutUseDbTest { - private static String host = "127.0.0.1"; + private static final String host = "127.0.0.1"; private static Properties properties; - private static Random random = new Random(System.currentTimeMillis()); + private static final Random random = new Random(System.currentTimeMillis()); @Test public void case001() throws ClassNotFoundException, SQLException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java index 4b4e83719ff64578e0ca5b34cd4cfe11ea8d98f3..9f8243542f0c2cf760ca192a0d39293531a5e42c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterJniTest.java @@ -8,14 +8,14 @@ public class InsertSpecialCharacterJniTest { private static final String host = "127.0.0.1"; private static Connection conn; - private static String dbName = "spec_char_test"; - private static String tbname1 = "test"; - private static String tbname2 = "weather"; - private static String special_character_str_1 = "$asd$$fsfsf$"; - private static String special_character_str_2 = "\\\\asdfsfsf\\\\"; - private static String special_character_str_3 = "\\\\asdfsfsf\\"; - private static String special_character_str_4 = "?asd??fsf?sf?"; - private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; + private static final String dbName = "spec_char_test"; + private static final String tbname1 = "test"; + private static final String tbname2 = "weather"; + private static final String special_character_str_1 = "$asd$$fsfsf$"; + private static final String special_character_str_2 = "\\\\asdfsfsf\\\\"; + private static final String special_character_str_3 = "\\\\asdfsfsf\\"; + private static final String special_character_str_4 = "?asd??fsf?sf?"; + private static final String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; @Test public void testCase01() throws SQLException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java index fa6cbd22b5da0869d4a74e90de12c952fecd22c7..2e981e7f414590c8d8be46659a415cb244a949ae 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertSpecialCharacterRestfulTest.java @@ -8,14 +8,14 @@ public class InsertSpecialCharacterRestfulTest { private static final String host = "127.0.0.1"; private static Connection conn; - private static String dbName = "spec_char_test"; - private static String tbname1 = "test"; - private static String tbname2 = "weather"; - private static String special_character_str_1 = "$asd$$fsfsf$"; - private static String special_character_str_2 = "\\\\asdfsfsf\\\\"; - private static String special_character_str_3 = "\\\\asdfsfsf\\"; - private static String special_character_str_4 = "?asd??fsf?sf?"; - private static String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; + private static final String dbName = "spec_char_test"; + private static final String tbname1 = "test"; + private static final String tbname2 = "weather"; + private static final String special_character_str_1 = "$asd$$fsfsf$"; + private static final String special_character_str_2 = "\\\\asdfsfsf\\\\"; + private static final String special_character_str_3 = "\\\\asdfsfsf\\"; + private static final String special_character_str_4 = "?asd??fsf?sf?"; + private static final String special_character_str_5 = "?#sd@$f(('<(s[P)>\"){]}f?s[]{}%vaew|\"fsfs^a&d*jhg)(j))(f@~!?$"; @Test public void testCase01() throws SQLException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java index f3d79b1df1594edc4fffd626244bc742ea13ec75..6febdd84972113e1ea779cd620865b0a25595486 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InvalidResultSetPointerTest.java @@ -8,13 +8,13 @@ import java.util.Properties; public class InvalidResultSetPointerTest { - private static String host = "127.0.0.1"; + private static final String host = "127.0.0.1"; private static final String dbName = "test"; private static final String stbName = "stb"; private static final String tbName = "tb"; private static Connection connection; - private static int numOfSTb = 30000; - private static int numOfTb = 3; + private static final int numOfSTb = 30000; + private static final int numOfTb = 3; private static int numOfThreads = 100; @Test @@ -74,7 +74,7 @@ public class InvalidResultSetPointerTest { b = numOfSTb % numOfThreads; } - multiThreadingClass instance[] = new multiThreadingClass[numOfThreads]; + multiThreadingClass[] instance = new multiThreadingClass[numOfThreads]; int last = 0; for (int i = 0; i < numOfThreads; i++) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java similarity index 97% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java index f9b111bb12f189a7a42c7944237aa01ebb008d25..eb8f134227713e4c41224dc6a561916427290864 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionJNITest.java @@ -10,7 +10,7 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -public class TwoTypeTimestampPercisionInJniTest { +public class MicroSecondPrecisionJNITest { private static final String host = "127.0.0.1"; private static final String ms_timestamp_db = "ms_precision_test"; @@ -41,7 +41,6 @@ public class TwoTypeTimestampPercisionInJniTest { rs.next(); Timestamp timestamp = rs.getTimestamp(1); - System.out.println(timestamp); long ts = timestamp.getTime(); Assert.assertEquals(timestamp1, ts); int nanos = timestamp.getNanos(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionRestfulTest.java similarity index 99% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionRestfulTest.java index 5c83b5a9da527a55387a8ad399e78462b6fab63c..7e9f04cd6360431a3fe6c29a2f0eb61fbdc9e7c4 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TwoTypeTimestampPercisionInRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MicroSecondPrecisionRestfulTest.java @@ -10,10 +10,9 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -public class TwoTypeTimestampPercisionInRestfulTest { +public class MicroSecondPrecisionRestfulTest { private static final String host = "127.0.0.1"; - private static final String ms_timestamp_db = "ms_precision_test"; private static final String us_timestamp_db = "us_precision_test"; private static final long timestamp1 = System.currentTimeMillis(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatmentTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatementTest.java similarity index 83% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatmentTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatementTest.java index 5cb76cc0cb71e1cb879b696d05cc6682f93d0bdc..73da79f357b1c066943e2f39bf9f8bdc86382d7e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatmentTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/MultiThreadsWithSameStatementTest.java @@ -7,21 +7,19 @@ import org.junit.Test; import java.sql.*; import java.util.concurrent.TimeUnit; -public class MultiThreadsWithSameStatmentTest { +public class MultiThreadsWithSameStatementTest { - - private class Service { + private static class Service { public Connection conn; public Statement stmt; public Service() { try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata"); stmt = conn.createStatement(); stmt.execute("create database if not exists jdbctest"); stmt.executeUpdate("create table if not exists jdbctest.weather (ts timestamp, f1 int)"); - } catch (ClassNotFoundException | SQLException e) { + } catch (SQLException e) { e.printStackTrace(); } } @@ -48,10 +46,6 @@ public class MultiThreadsWithSameStatmentTest { ResultSet resultSet = service.stmt.executeQuery("select * from jdbctest.weather"); while (resultSet.next()) { ResultSetMetaData metaData = resultSet.getMetaData(); - for (int i = 1; i <= metaData.getColumnCount(); i++) { - System.out.print(metaData.getColumnLabel(i) + ": " + resultSet.getString(i)); - } - System.out.println(); } resultSet.close(); service.release(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NanoSecondTimestampJNITest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NanoSecondTimestampJNITest.java new file mode 100644 index 0000000000000000000000000000000000000000..4f2c87966ad6bb8390bab47b795e7d952725baf5 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NanoSecondTimestampJNITest.java @@ -0,0 +1,182 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.time.Instant; +import java.util.Random; + +public class NanoSecondTimestampJNITest { + + private static final String host = "127.0.0.1"; + private static final String dbname = "nano_sec_test"; + private static final Random random = new Random(System.currentTimeMillis()); + private static Connection conn; + + @Test + public void insertUsingLongValue() { + // given + long ms = System.currentTimeMillis(); + long ns = ms * 1000_000 + random.nextInt(1000_000); + + // when + int ret = 0; + try (Statement stmt = conn.createStatement()) { + ret = stmt.executeUpdate("insert into weather(ts, temperature, humidity) values(" + ns + ", 12.3, 4)"); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(1, ret); + } + + @Test + public void insertUsingStringValue() { + // given + + // when + int ret = 0; + try (Statement stmt = conn.createStatement()) { + ret = stmt.executeUpdate("insert into weather(ts, temperature, humidity) values('2021-01-01 12:00:00.123456789', 12.3, 4)"); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(1, ret); + } + + @Test + public void insertUsingTimestampValue() { + // given + long epochSec = System.currentTimeMillis() / 1000; + long nanoAdjustment = random.nextInt(1000_000_000); + Timestamp ts = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + + // when + int ret = 0; + String sql = "insert into weather(ts, temperature, humidity) values( ?, ?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, ts); + pstmt.setFloat(2, 12.34f); + pstmt.setInt(3, 55); + ret = pstmt.executeUpdate(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(1, ret); + } + + @Test + public void selectUsingLongValue() throws SQLException { + // given + long ms = System.currentTimeMillis(); + long ns = ms * 1000_000L + random.nextInt(1000_000); + + // when + ResultSet rs = null; + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into weather(ts, temperature, humidity) values(" + ns + ", 12.3, 4)"); + rs = stmt.executeQuery("select * from weather"); + rs.next(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + long actual = rs.getLong(1); + Assert.assertEquals(ms, actual); + actual = rs.getLong("ts"); + Assert.assertEquals(ms, actual); + } + + @Test + public void selectUsingStringValue() throws SQLException { + // given + String timestampStr = "2021-01-01 12:00:00.123456789"; + + // when + ResultSet rs = null; + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into weather(ts, temperature, humidity) values('" + timestampStr + "', 12.3, 4)"); + rs = stmt.executeQuery("select * from weather"); + rs.next(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + String actual = rs.getString(1); + Assert.assertEquals(timestampStr, actual); + actual = rs.getString("ts"); + Assert.assertEquals(timestampStr, actual); + } + + @Test + public void selectUsingTimestampValue() throws SQLException { + // given + long timeMillis = System.currentTimeMillis(); + long epochSec = timeMillis / 1000; + long nanoAdjustment = (timeMillis % 1000) * 1000_000L + random.nextInt(1000_000); + Timestamp ts = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + + // insert one row + String sql = "insert into weather(ts, temperature, humidity) values( ?, ?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, ts); + pstmt.setFloat(2, 12.34f); + pstmt.setInt(3, 55); + pstmt.executeUpdate(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // when + ResultSet rs = null; + try (Statement stmt = conn.createStatement()) { + rs = stmt.executeQuery("select * from weather"); + rs.next(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Timestamp actual = rs.getTimestamp(1); + Assert.assertEquals(ts, actual); + actual = rs.getTimestamp("ts"); + Assert.assertEquals(ts, actual); + Assert.assertEquals(timeMillis, actual.getTime()); + Assert.assertEquals(nanoAdjustment, actual.getNanos()); + } + + @Before + public void before() { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop table if exists weather"); + stmt.execute("create table weather(ts timestamp, temperature float, humidity int)"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try { + conn = DriverManager.getConnection(url); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname + " precision 'ns'"); + stmt.execute("use " + dbname); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NanoSecondTimestampRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NanoSecondTimestampRestfulTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4271f918b9b096000cc59b730d7b70f032a0ab29 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NanoSecondTimestampRestfulTest.java @@ -0,0 +1,182 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; +import java.time.Instant; +import java.util.Random; + +public class NanoSecondTimestampRestfulTest { + + private static final String host = "127.0.0.1"; + private static final String dbname = "nano_sec_test"; + private static final Random random = new Random(System.currentTimeMillis()); + private static Connection conn; + + @Test + public void insertUsingLongValue() { + // given + long ms = System.currentTimeMillis(); + long ns = ms * 1000_000 + random.nextInt(1000_000); + + // when + int ret = 0; + try (Statement stmt = conn.createStatement()) { + ret = stmt.executeUpdate("insert into weather(ts, temperature, humidity) values(" + ns + ", 12.3, 4)"); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(1, ret); + } + + @Test + public void insertUsingStringValue() { + // given + + // when + int ret = 0; + try (Statement stmt = conn.createStatement()) { + ret = stmt.executeUpdate("insert into weather(ts, temperature, humidity) values('2021-01-01 12:00:00.123456789', 12.3, 4)"); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(1, ret); + } + + @Test + public void insertUsingTimestampValue() { + // given + long epochSec = System.currentTimeMillis() / 1000; + long nanoAdjustment = random.nextInt(1000_000_000); + Timestamp ts = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + + // when + int ret = 0; + String sql = "insert into weather(ts, temperature, humidity) values( ?, ?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, ts); + pstmt.setFloat(2, 12.34f); + pstmt.setInt(3, 55); + ret = pstmt.executeUpdate(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(1, ret); + } + + @Test + public void selectUsingLongValue() throws SQLException { + // given + long ms = System.currentTimeMillis(); + long ns = ms * 1000_000L + random.nextInt(1000_000); + + // when + ResultSet rs = null; + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into weather(ts, temperature, humidity) values(" + ns + ", 12.3, 4)"); + rs = stmt.executeQuery("select * from weather"); + rs.next(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + long actual = rs.getLong(1); + Assert.assertEquals(ms, actual); + actual = rs.getLong("ts"); + Assert.assertEquals(ms, actual); + } + + @Test + public void selectUsingStringValue() throws SQLException { + // given + String timestampStr = "2021-01-01 12:00:00.123456789"; + + // when + ResultSet rs = null; + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into weather(ts, temperature, humidity) values('" + timestampStr + "', 12.3, 4)"); + rs = stmt.executeQuery("select * from weather"); + rs.next(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + String actual = rs.getString(1); + Assert.assertEquals(timestampStr, actual); + actual = rs.getString("ts"); + Assert.assertEquals(timestampStr, actual); + } + + @Test + public void selectUsingTimestampValue() throws SQLException { + // given + long timeMillis = System.currentTimeMillis(); + long epochSec = timeMillis / 1000; + long nanoAdjustment = (timeMillis % 1000) * 1000_000L + random.nextInt(1000_000); + Timestamp ts = Timestamp.from(Instant.ofEpochSecond(epochSec, nanoAdjustment)); + + // insert one row + String sql = "insert into weather(ts, temperature, humidity) values( ?, ?, ?)"; + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + pstmt.setTimestamp(1, ts); + pstmt.setFloat(2, 12.34f); + pstmt.setInt(3, 55); + pstmt.executeUpdate(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // when + ResultSet rs = null; + try (Statement stmt = conn.createStatement()) { + rs = stmt.executeQuery("select * from weather"); + rs.next(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Timestamp actual = rs.getTimestamp(1); + Assert.assertEquals(ts, actual); + actual = rs.getTimestamp("ts"); + Assert.assertEquals(ts, actual); + Assert.assertEquals(timeMillis, actual.getTime()); + Assert.assertEquals(nanoAdjustment, actual.getNanos()); + } + + @Before + public void before() { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop table if exists weather"); + stmt.execute("create table weather(ts timestamp, temperature float, humidity int)"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @BeforeClass + public static void beforeClass() { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + try { + conn = DriverManager.getConnection(url); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname + " precision 'ns'"); + stmt.execute("use " + dbname); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java similarity index 87% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java index 782125144c4fbe8dcc4bdfd4769e95e5119ea32f..ae0241bf31eea85083bf102c4123f7e30c2bd693 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetJNITest.java @@ -6,7 +6,7 @@ import org.junit.Test; import java.sql.*; -public class NullValueInResultSetForJdbcJniTest { +public class NullValueInResultSetJNITest { private static final String host = "127.0.0.1"; Connection conn; @@ -17,11 +17,6 @@ public class NullValueInResultSetForJdbcJniTest { ResultSet rs = stmt.executeQuery("select * from weather"); ResultSetMetaData meta = rs.getMetaData(); while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - Object value = rs.getObject(i); - System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); - } - System.out.println(); } } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java similarity index 92% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java index f2ac94adc1b6e7d94e52650dcfbb5664c8f39760..7fbb30a5244a53129807cd76472674ff1cfd6ae4 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetRestfulTest.java @@ -6,7 +6,7 @@ import org.junit.Test; import java.sql.*; -public class NullValueInResultSetForJdbcRestfulTest { +public class NullValueInResultSetRestfulTest { private static final String host = "127.0.0.1"; Connection conn; @@ -19,9 +19,7 @@ public class NullValueInResultSetForJdbcRestfulTest { while (rs.next()) { for (int i = 1; i <= meta.getColumnCount(); i++) { Object value = rs.getObject(i); - System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); } - System.out.println(); } } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetTest.java similarity index 99% rename from src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java rename to src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetTest.java index c6fba81eb24b9b8c08fd553ca57b1e1d68bb81e0..61d767b5cf2bcd2e478de74e5f4bb8d66ad21678 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD3841Test.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetTest.java @@ -7,7 +7,7 @@ import org.junit.*; import java.sql.*; import java.util.Properties; -public class TD3841Test { +public class NullValueInResultSetTest { private static final String host = "127.0.0.1"; private static Properties properties; private static Connection conn_restful; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/PreparedStatementBatchInsertJNITest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/PreparedStatementBatchInsertJNITest.java new file mode 100644 index 0000000000000000000000000000000000000000..ed78c2561ec437e3b7c6a0ab8d3e91699c2572af --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/PreparedStatementBatchInsertJNITest.java @@ -0,0 +1,98 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class PreparedStatementBatchInsertJNITest { + + private static final String host = "127.0.0.1"; + private static final String dbname = "td4668"; + + private final Random random = new Random(System.currentTimeMillis()); + private Connection conn; + + @Test + public void test() { + // given + long ts = System.currentTimeMillis(); + List rows = IntStream.range(0, 10).mapToObj(i -> { + Object[] row = new Object[6]; + final String groupId = String.format("%02d", random.nextInt(100)); + // table name (d + groupId)组合 + row[0] = "d" + groupId; + // tag + row[1] = groupId; + // ts + row[2] = ts + i; + // current 电流 + row[3] = random.nextFloat(); + // voltage 电压 + row[4] = Math.random() > 0.5 ? 220 : 380; + // phase 相位 + row[5] = random.nextInt(10); + return row; + }).collect(Collectors.toList()); + final String sql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)"; + + // when + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + for (Object[] row : rows) { + for (int i = 0; i < row.length; i++) { + pstmt.setObject(i + 1, row[i]); + } + pstmt.addBatch(); + } + pstmt.executeBatch(); + } catch (SQLException e) { + e.printStackTrace(); + Assert.fail(); + } + + // then + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from meters"); + int count = 0; + while (rs.next()) { + count++; + } + Assert.assertEquals(10, count); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() { + try { + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table meters(ts timestamp, current float, voltage int, phase int) tags(groupId int)"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/PreparedStatementBatchInsertRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/PreparedStatementBatchInsertRestfulTest.java new file mode 100644 index 0000000000000000000000000000000000000000..90b285381a2ab57b170da327a95dde4c8991ce21 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/PreparedStatementBatchInsertRestfulTest.java @@ -0,0 +1,98 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class PreparedStatementBatchInsertRestfulTest { + + private static final String host = "127.0.0.1"; + private static final String dbname = "td4668"; + + private final Random random = new Random(System.currentTimeMillis()); + private Connection conn; + + @Test + public void test() { + // given + long ts = System.currentTimeMillis(); + List rows = IntStream.range(0, 10).mapToObj(i -> { + Object[] row = new Object[6]; + final String groupId = String.format("%02d", random.nextInt(100)); + // table name (d + groupId)组合 + row[0] = "d" + groupId; + // tag + row[1] = groupId; + // ts + row[2] = ts + i; + // current 电流 + row[3] = random.nextFloat(); + // voltage 电压 + row[4] = Math.random() > 0.5 ? 220 : 380; + // phase 相位 + row[5] = random.nextInt(10); + return row; + }).collect(Collectors.toList()); + final String sql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)"; + + // when + try (PreparedStatement pstmt = conn.prepareStatement(sql)) { + for (Object[] row : rows) { + for (int i = 0; i < row.length; i++) { + pstmt.setObject(i + 1, row[i]); + } + pstmt.addBatch(); + } + pstmt.executeBatch(); + } catch (SQLException e) { + e.printStackTrace(); + Assert.fail(); + } + + // then + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from meters"); + int count = 0; + while (rs.next()) { + count++; + } + Assert.assertEquals(10, count); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() { + try { + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table meters(ts timestamp, current float, voltage int, phase int) tags(groupId int)"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.close(); + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java index d0ba113b7a4a8f99e22eb8143905d0b086583e1d..535e56f7d7735a7cbd209fbb2a2fddd492021e15 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java @@ -21,7 +21,6 @@ public class QueryDataTest { @Before public void createDatabase() { try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); @@ -36,7 +35,7 @@ public class QueryDataTest { String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))"; statement.executeUpdate(createTableSql); - } catch (ClassNotFoundException | SQLException e) { + } catch (SQLException e) { return; } } @@ -44,7 +43,6 @@ public class QueryDataTest { @Test public void testQueryBinaryData() throws SQLException { String insertSql = "insert into " + stbName + " values(now, 'taosdata')"; - System.out.println(insertSql); statement.executeUpdate(insertSql); String querySql = "select * from " + stbName; @@ -52,7 +50,6 @@ public class QueryDataTest { while (rs.next()) { String name = rs.getString(2); - System.out.println("name = " + name); assertEquals("taosdata", name); } rs.close(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java new file mode 100644 index 0000000000000000000000000000000000000000..4eebbd08e87b2e85ce319bc0bc98bc4515bd2077 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResetQueryCacheTest.java @@ -0,0 +1,51 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; + +import static org.junit.Assert.assertEquals; + +public class ResetQueryCacheTest { + + static Connection connection; + static Statement statement; + static String host = "127.0.0.1"; + + @Before + public void init() { + try { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); + statement = connection.createStatement(); + } catch (SQLException e) { + return; + } + } + + @Test + public void testResetQueryCache() throws SQLException { + String resetSql = "reset query cache"; + statement.execute(resetSql); + } + + @After + public void close() { + try { + if (statement != null) + statement.close(); + if (connection != null) + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java new file mode 100644 index 0000000000000000000000000000000000000000..be27b1350781245e3056185db4bbaa8b5105d2f0 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ResultSetMetaShouldNotBeNullRestfulTest.java @@ -0,0 +1,86 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class ResultSetMetaShouldNotBeNullRestfulTest { + + private static final String host = "127.0.0.1"; + private static final String dbname = "td4745"; + + private Connection connection; + + @Test + public void testExecuteQuery() { + // given + ResultSetMetaData metaData = null; + int columnCount = -1; + + // when + try { + Statement statement = connection.createStatement(); + metaData = statement.executeQuery("select * from weather").getMetaData(); + columnCount = metaData.getColumnCount(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertNotNull(metaData); + Assert.assertEquals(2, columnCount); + } + + @Test + public void testExecute() { + // given + ResultSetMetaData metaData = null; + int columnCount = -1; + boolean execute = false; + // when + try { + Statement statement = connection.createStatement(); + execute = statement.execute("select * from weather"); + metaData = statement.getResultSet().getMetaData(); + columnCount = metaData.getColumnCount(); + } catch (SQLException e) { + e.printStackTrace(); + } + + // then + Assert.assertEquals(true, execute); + Assert.assertNotNull(metaData); + Assert.assertEquals(2, columnCount); + } + + @Before + public void before() { + try { + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + Statement stmt = connection.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table weather (ts timestamp, temperature float)"); + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + Statement stmt = connection.createStatement(); + stmt.execute("drop database if exists " + dbname); + stmt.close(); + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java index 38c8cbb98c48342f131f4f5f0fee885bb446e83c..0022ceaf2123ac03192f761ef068ecf5ad333e6d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/SelectTest.java @@ -19,7 +19,6 @@ public class SelectTest { @Before public void createDatabaseAndTable() { try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); @@ -31,8 +30,6 @@ public class SelectTest { stmt.execute("create database if not exists " + dbName); stmt.execute("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); stmt.close(); - } catch (ClassNotFoundException e) { - return; } catch (SQLException e) { e.printStackTrace(); } @@ -47,7 +44,6 @@ public class SelectTest { for (int i = 0; i < 50; i++) { ts++; int row = stmt.executeUpdate("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")"); - System.out.println("insert into " + dbName + "." + tName + " values (" + ts + ", " + (100 + i) + ", " + i + ")\t" + row); assertEquals(1, row); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java index 4575cb73a05fbbc19d6eaf2ba5be0ed27b61804c..1600fec13d1f2a56caf3905b863aa132fe1de830 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/StableTest.java @@ -16,14 +16,13 @@ import static org.junit.Assert.assertEquals; public class StableTest { private static Connection connection; - private static String dbName = "test"; - private static String stbName = "st"; - private static String host = "127.0.0.1"; + private static final String dbName = "test"; + private static final String stbName = "st"; + private static final String host = "127.0.0.1"; @BeforeClass public static void createDatabase() { try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); @@ -34,8 +33,6 @@ public class StableTest { statement.execute("create database if not exists " + dbName); statement.execute("use " + dbName); statement.close(); - } catch (ClassNotFoundException e) { - return; } catch (SQLException e) { e.printStackTrace(); } @@ -68,9 +65,6 @@ public class StableTest { String sql = "describe " + stbName; ResultSet rs = stmt.executeQuery(sql); while (rs.next()) { - for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) { - System.out.println(i + ":" + rs.getString(i)); - } num++; } rs.close(); @@ -86,9 +80,6 @@ public class StableTest { try (Statement stmt = connection.createStatement()) { ResultSet rs = stmt.executeQuery("describe t1"); while (rs.next()) { - for (int i = 1; i <= rs.getMetaData().getColumnCount(); i++) { - System.out.printf("%d: %s\n", i, rs.getString(i)); - } num++; } rs.close(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4144Test.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4144Test.java deleted file mode 100644 index 6f29f64111c51600303ad73d517faaa7c59cfe7c..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TD4144Test.java +++ /dev/null @@ -1,105 +0,0 @@ -package com.taosdata.jdbc.cases; - -import com.taosdata.jdbc.TSDBConnection; -import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.TSDBResultSet; -import com.taosdata.jdbc.TSDBSubscribe; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.sql.DriverManager; -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Properties; -import java.util.concurrent.TimeUnit; - -public class TD4144Test { - - private static TSDBConnection connection; - private static final String host = "127.0.0.1"; - - private static final String topic = "topic-meter-current-bg-10"; - private static final String sql = "select * from meters where current > 10"; - private static final String sql2 = "select * from meters where ts >= '2020-08-15 12:20:00.000'"; - - - @Test - public void test() throws SQLException { - TSDBSubscribe subscribe = null; - TSDBResultSet res = null; - boolean hasNext = false; - - try { - subscribe = connection.subscribe(topic, sql, false); - int count = 0; - while (true) { - // 等待1秒,避免频繁调用 consume,给服务端造成压力 - TimeUnit.SECONDS.sleep(1); - if (res == null) { - // 消费数据 - res = subscribe.consume(); - hasNext = res.next(); - } - - if (res == null) { - continue; - } - ResultSetMetaData metaData = res.getMetaData(); - int number = 0; - while (hasNext) { - int columnCount = metaData.getColumnCount(); - for (int i = 1; i <= columnCount; i++) { - System.out.print(metaData.getColumnLabel(i) + ": " + res.getString(i) + "\t"); - } - System.out.println(); - count++; - number++; - hasNext = res.next(); - if (!hasNext) { - res.close(); - res = null; - System.out.println("rows: " + count); - } - if (hasNext == true && number >= 10) { - System.out.println("batch" + number); - break; - } - } - - } - - } catch (SQLException | InterruptedException throwables) { - throwables.printStackTrace(); - } finally { - if (subscribe != null) - subscribe.close(true); - } - } - - @BeforeClass - public static void beforeClass() throws SQLException { - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; - connection = (DriverManager.getConnection(url, properties)).unwrap(TSDBConnection.class); - try (Statement stmt = connection.createStatement()) { - stmt.execute("drop database if exists power"); - stmt.execute("create database if not exists power"); - stmt.execute("use power"); - stmt.execute("create table meters(ts timestamp, current float, voltage int, phase int) tags(location binary(64), groupId int)"); - stmt.execute("create table d1001 using meters tags(\"Beijing.Chaoyang\", 2)"); - stmt.execute("create table d1002 using meters tags(\"Beijing.Haidian\", 2)"); - stmt.execute("insert into d1001 values(\"2020-08-15 12:00:00.000\", 12, 220, 1),(\"2020-08-15 12:10:00.000\", 12.3, 220, 2),(\"2020-08-15 12:20:00.000\", 12.2, 220, 1)"); - stmt.execute("insert into d1002 values(\"2020-08-15 12:00:00.000\", 9.9, 220, 1),(\"2020-08-15 12:10:00.000\", 10.3, 220, 1),(\"2020-08-15 12:20:00.000\", 11.2, 220, 1)"); - } - } - - @AfterClass - public static void afterClass() throws SQLException { - if (connection != null) - connection.close(); - } -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java index e9e36e20c4f83e98d6dd808da3a58ee628f418f0..7df9f7380704eefb51b26214295624995811c84f 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TaosInfoMonitorTest.java @@ -25,7 +25,7 @@ public class TaosInfoMonitorTest { return null; }).collect(Collectors.toList()); - connectionList.stream().forEach(conn -> { + connectionList.forEach(conn -> { try (Statement stmt = conn.createStatement()) { ResultSet rs = stmt.executeQuery("show databases"); while (rs.next()) { @@ -37,7 +37,7 @@ public class TaosInfoMonitorTest { } }); - connectionList.stream().forEach(conn -> { + connectionList.forEach(conn -> { try { conn.close(); TimeUnit.MILLISECONDS.sleep(100); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java new file mode 100644 index 0000000000000000000000000000000000000000..72734cb0ecdfe67cbd02306acef3ae8625f942b0 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimestampPrecisionInNanoInJniTest.java @@ -0,0 +1,570 @@ +package com.taosdata.jdbc.cases; + + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.After; +import org.junit.Test; + +import java.sql.*; +import java.util.Properties; +import java.text.Format; +import java.text.SimpleDateFormat; + +public class TimestampPrecisionInNanoInJniTest { + + private static final String host = "127.0.0.1"; + private static final String ns_timestamp_db = "ns_precision_test"; + private static final long timestamp1 = System.currentTimeMillis(); + private static final long timestamp2 = timestamp1 * 1000_000 + 123455; + private static final long timestamp3 = (timestamp1 + 10) * 1000_000 + 123456; + private static final Format format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + private static final String date1 = format.format(new Date(timestamp1)); + private static final String date4 = format.format(new Date(timestamp1 + 10L)); + private static final String date2 = date1 + "123455"; + private static final String date3 = date4 + "123456"; + + + private static Connection conn; + + @BeforeClass + public static void beforeClass() throws SQLException { + Properties properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url, properties); + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ns_timestamp_db); + stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); + stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); + stmt.close(); + } + + @After + public void afterEach() throws SQLException { + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists " + ns_timestamp_db); + stmt.execute("create database if not exists " + ns_timestamp_db + " precision 'ns'"); + stmt.execute("create table " + ns_timestamp_db + ".weather(ts timestamp, ts2 timestamp, f1 int)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date3 + "\", \"" + date3 + "\", 128)"); + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp2 + "," + timestamp2 + ", 127)"); + stmt.close(); + } + + @AfterClass + public static void afterClass() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void checkCount(long count, ResultSet rs) throws SQLException { + if (count == 0) { + Assert.fail(); + } + rs.next(); + long test_count = rs.getLong(1); + Assert.assertEquals(count, test_count); + } + + private void checkTime(long ts, ResultSet rs) throws SQLException { + rs.next(); + int nanos = rs.getTimestamp(1).getNanos(); + Assert.assertEquals(ts % 1000_000_000l, nanos); + long test_ts = rs.getLong(1); + Assert.assertEquals(ts / 1000_000l, test_ts); + } + + @Test + public void canInsertTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date3 + "'"); + checkTime(timestamp3, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date3 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canImportTimestampAndQueryByEqualToInDateTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(\"" + date1 + "123123\", \"" + date1 + "123123\", 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + date1 + "123123'"); + checkTime(timestamp1 * 1000_000l + 123123l, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "123123'"); + checkTime(timestamp1 * 1000_000l + 123123l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canInsertTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp2 + "'"); + checkTime(timestamp2, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp2 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canImportTimestampAndQueryByEqualToInNumberTypeInBothFirstAndSecondCol() { + try (Statement stmt = conn.createStatement()) { + long timestamp4 = timestamp1 * 1000_000 + 123123; + stmt.executeUpdate("import into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(" + timestamp4 + ", " + timestamp4 + ", 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts = '" + timestamp4 + "'"); + checkTime(timestamp4, rs); + rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 = '" + timestamp4 + "'"); + checkTime(timestamp4, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectLastRowFromWeatherForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last(ts) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectLastRowFromWeatherForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select last(ts2) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectFirstRowFromWeatherForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select first(ts) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canSelectFirstRowFromWeatherForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select first(ts2) from " + ns_timestamp_db + ".weather"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts >= '" + timestamp2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLargerThanOrEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 >= '" + timestamp2 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts < '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 < '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryLessThanOrEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "'"); + checkCount(2l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInDateTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + date3 + "' AND ts > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + date3 + "' AND ts2 > '" + date2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInNumberTypeForFirstCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts from " + ns_timestamp_db + ".weather where ts <= '" + timestamp3 + "' AND ts > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryBetweenAndInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <= '" + timestamp3 + "' AND ts2 > '" + timestamp2 + "'"); + checkTime(timestamp3, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualToInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualToInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 <> '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualInDateTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + date3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canQueryNotEqualInNumberTypeForSecondCol() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'"); + checkCount(1l, rs); + rs = stmt.executeQuery("select ts2 from " + ns_timestamp_db + ".weather where ts2 != '" + timestamp3 + "'"); + checkTime(timestamp2, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canInsertTimestampWithNowAndNsOffsetInBothFirstAndSecondCol(){ + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(now + 1000b, now - 1000b, 128)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather"); + checkCount(3l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canIntervalAndSlidingAcceptNsUnitForFirstCol(){ + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); + rs.next(); + long sum = rs.getLong(2); + Assert.assertEquals(127l, sum); + rs.next(); + sum = rs.getLong(2); + Assert.assertEquals(128l, sum); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void canIntervalAndSlidingAcceptNsUnitForSecondCol(){ + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select sum(f1) from " + ns_timestamp_db + ".weather where ts2 >= '" + date2 + "' and ts <= '" + date3 + "' interval(10000000b) sliding(10000000b)"); + rs.next(); + long sum = rs.getLong(2); + Assert.assertEquals(127l, sum); + rs.next(); + sum = rs.getLong(2); + Assert.assertEquals(128l, sum); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void testDataOutOfRangeExceptionForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(123456789012345678, 1234567890123456789, 127)"); + } catch (SQLException e) { + Assert.assertEquals("TDengine ERROR (8000060b): Timestamp data out of range", e.getMessage()); + } + } + + @Test + public void testDataOutOfRangeExceptionForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values(1234567890123456789, 123456789012345678, 127)"); + } catch (SQLException e) { + Assert.assertEquals("TDengine ERROR (8000060b): Timestamp data out of range", e.getMessage()); + } + } + + @Test + public void willAutomaticallyFillToNsUnitWithZerosForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "000000'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyFillToNsUnitWithZerosForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "', '" + date1 + "', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "000000'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyDropDigitExceedNsDigitNumberForFirstCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts = '" + date1 + "999999'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Test + public void willAutomaticallyDropDigitExceedNsDigitNumberForSecondCol() { + try (Statement stmt = conn.createStatement()) { + stmt.executeUpdate("insert into " + ns_timestamp_db + ".weather(ts, ts2, f1) values('" + date1 + "999999999', '" + date1 + "999999999', 127)"); + ResultSet rs = stmt.executeQuery("select count(*) from " + ns_timestamp_db + ".weather where ts2 = '" + date1 + "999999'"); + checkCount(1l, rs); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java index fb23c0e64aa7469443d5476de47af305cfd3c09f..e4149793aca46ebe5df47aec002828441fef481d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberJniTest.java @@ -44,7 +44,7 @@ public class UnsignedNumberJniTest { Assert.assertEquals(127, rs.getByte(2)); Assert.assertEquals(32767, rs.getShort(3)); Assert.assertEquals(2147483647, rs.getInt(4)); - Assert.assertEquals(9223372036854775807l, rs.getLong(5)); + Assert.assertEquals(9223372036854775807L, rs.getLong(5)); } } catch (SQLException e) { e.printStackTrace(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java index a659a490cb557f3825deb4bb58d32941b47a62ab..3bdf5ae4f2404db5f56e27b740d4e4951e10818d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java @@ -45,7 +45,7 @@ public class UnsignedNumberRestfulTest { Assert.assertEquals(127, rs.getByte(2)); Assert.assertEquals(32767, rs.getShort(3)); Assert.assertEquals(2147483647, rs.getInt(4)); - Assert.assertEquals(9223372036854775807l, rs.getLong(5)); + Assert.assertEquals(9223372036854775807L, rs.getLong(5)); } } catch (SQLException e) { e.printStackTrace(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java index e239aa068bae0ee70d204aec4412bf29e5b36bf1..c2f732c86903dc80328f975260a4b2352e55efd9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDriverTest.java @@ -8,11 +8,6 @@ import java.sql.*; public class RestfulDriverTest { private static final String host = "127.0.0.1"; - @Test - public void connect() { - - } - @Test public void acceptsURL() throws SQLException { Driver driver = new RestfulDriver(); @@ -27,9 +22,7 @@ public class RestfulDriverTest { Driver driver = new RestfulDriver(); final String url = ""; DriverPropertyInfo[] propertyInfo = driver.getPropertyInfo(url, null); - for (DriverPropertyInfo prop : propertyInfo) { - System.out.println(prop); - } + Assert.assertNotNull(propertyInfo); } @Test diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java index c8bb69d82749e606f18d3298697ea0995029d064..b07dae8003d6e2fea073c0d240f59fb6db0c593f 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java @@ -10,132 +10,151 @@ import java.util.Random; public class RestfulJDBCTest { private static final String host = "127.0.0.1"; - private static Connection connection; - private Random random = new Random(System.currentTimeMillis()); + private final Random random = new Random(System.currentTimeMillis()); + private Connection connection; - /** - * select * from log.log - **/ @Test public void testCase001() { - try { - Statement statement = connection.createStatement(); - ResultSet resultSet = statement.executeQuery("select * from log.log"); - ResultSetMetaData metaData = resultSet.getMetaData(); - while (resultSet.next()) { - for (int i = 1; i <= metaData.getColumnCount(); i++) { - String column = metaData.getColumnLabel(i); - String value = resultSet.getString(i); - System.out.print(column + ":" + value + "\t"); - } - System.out.println(); - } - statement.close(); - } catch (SQLException e) { - e.printStackTrace(); - } + // given + String sql = "drop database if exists restful_test"; + // when + boolean execute = execute(connection, sql); + // then + Assert.assertFalse(execute); + + // given + sql = "create database if not exists restful_test"; + // when + execute = execute(connection, sql); + // then + Assert.assertFalse(execute); + + // given + sql = "use restful_test"; + // when + execute = execute(connection, sql); + // then + Assert.assertFalse(execute); } - /** - * create database - */ @Test public void testCase002() { - try (Statement stmt = connection.createStatement()) { - stmt.execute("drop database if exists restful_test"); - stmt.execute("create database if not exists restful_test"); - stmt.execute("use restful_test"); - } catch (SQLException e) { - e.printStackTrace(); - } + // given + String sql = "create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + // when + boolean execute = execute(connection, sql); + // then + Assert.assertFalse(execute); } - /** - * create super table - ***/ @Test - public void testCase003() { - try (Statement stmt = connection.createStatement()) { - stmt.execute("create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"); - } catch (SQLException e) { - e.printStackTrace(); + public void testCase004() { + for (int i = 1; i <= 100; i++) { + // given + String sql = "create table t" + i + " using weather tags('beijing', '" + i + "')"; + // when + boolean execute = execute(connection, sql); + // then + Assert.assertFalse(execute); } } @Test - public void testCase004() { - try (Statement stmt = connection.createStatement()) { - for (int i = 1; i <= 100; i++) { - stmt.execute("create table t" + i + " using weather tags('beijing', '" + i + "')"); + public void testCase005() { + int rows = 0; + for (int i = 0; i < 10; i++) { + for (int j = 1; j <= 100; j++) { + + // given + long currentTimeMillis = System.currentTimeMillis(); + String sql = "insert into t" + j + " values(" + currentTimeMillis + "," + (random.nextFloat() * 50) + "," + random.nextInt(100) + ")"; + // when + int affectRows = executeUpdate(connection, sql); + // then + Assert.assertEquals(1, affectRows); + + rows += affectRows; } - } catch (SQLException e) { - e.printStackTrace(); } + Assert.assertEquals(1000, rows); } + @Test + public void testCase006() throws SQLException { + // given + String sql = "select * from weather"; + // when + ResultSet rs = executeQuery(connection, sql); + ResultSetMetaData meta = rs.getMetaData(); + + // then + Assert.assertEquals(5, meta.getColumnCount()); + + while (rs.next()) { + Assert.assertNotNull(rs.getTimestamp("ts")); + Assert.assertNotNull(rs.getFloat("temperature")); + Assert.assertNotNull(rs.getInt("humidity")); + Assert.assertNotNull(rs.getString("location")); + } + } @Test - public void testCase005() { + public void testCase007() { + // given + String sql = "drop database restful_test"; + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); + } + + private int executeUpdate(Connection connection, String sql) { try (Statement stmt = connection.createStatement()) { - int rows = 0; - for (int i = 0; i < 10; i++) { - for (int j = 1; j <= 100; j++) { - long currentTimeMillis = System.currentTimeMillis(); - int affectRows = stmt.executeUpdate("insert into t" + j + " values(" + currentTimeMillis + "," + (random.nextFloat() * 50) + "," + random.nextInt(100) + ")"); - Assert.assertEquals(1, affectRows); - rows += affectRows; - } - } - Assert.assertEquals(1000, rows); + return stmt.executeUpdate(sql); } catch (SQLException e) { e.printStackTrace(); } + return 0; } - @Test - public void testCase006() { + private boolean execute(Connection connection, String sql) { try (Statement stmt = connection.createStatement()) { - ResultSet rs = stmt.executeQuery("select * from weather"); - while (rs.next()) { - System.out.print("ts: " + rs.getTimestamp("ts")); - System.out.print(", temperature: " + rs.getString("temperature")); - System.out.print(", humidity: " + rs.getString("humidity")); - System.out.println(", location: " + rs.getString("location")); - } + return stmt.execute(sql); } catch (SQLException e) { e.printStackTrace(); } + return false; } - @Test - public void testCase007() { - try (Statement stmt = connection.createStatement()) { - ResultSet rs = stmt.executeQuery("select * from weather"); - ResultSetMetaData meta = rs.getMetaData(); - while (rs.next()) { - int columnCount = meta.getColumnCount(); - for (int i = 1; i <= columnCount; i++) { - String columnLabel = meta.getColumnLabel(i); - String value = rs.getString(i); - System.out.print(columnLabel + ": " + value + "\t"); - } - System.out.println(); - } + + private ResultSet executeQuery(Connection connection, String sql) { + try (Statement statement = connection.createStatement()) { + return statement.executeQuery(sql); } catch (SQLException e) { e.printStackTrace(); } + return null; } - @BeforeClass - public static void before() throws ClassNotFoundException, SQLException { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); - connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); + @Before + public void before() { + try { + connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); + } catch (SQLException e) { + e.printStackTrace(); + } } - @AfterClass - public static void after() throws SQLException { - if (connection != null) - connection.close(); + @After + public void after() { + try { + if (connection != null) + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java index ee457ff4127ccf3fe88cf277d581a3dcb3475df9..4760a723e4b4e662326987290c2c630803f8f470 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java @@ -15,6 +15,8 @@ public class RestfulPreparedStatementTest { private static PreparedStatement pstmt_insert; private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?"; private static PreparedStatement pstmt_select; + private static final String sql_without_parameters = "select count(*) from t1"; + private static PreparedStatement pstmt_without_parameters; @Test public void executeQuery() throws SQLException { @@ -27,12 +29,9 @@ public class RestfulPreparedStatementTest { ResultSet rs = pstmt_select.executeQuery(); Assert.assertNotNull(rs); ResultSetMetaData meta = rs.getMetaData(); - while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); - } - System.out.println(); - } + int columnCount = meta.getColumnCount(); + Assert.assertEquals(10, columnCount); + Assert.assertNotNull(rs); } @Test @@ -240,6 +239,7 @@ public class RestfulPreparedStatementTest { @Test public void clearParameters() throws SQLException { pstmt_insert.clearParameters(); + pstmt_without_parameters.clearParameters(); } @Test @@ -373,18 +373,20 @@ public class RestfulPreparedStatementTest { @BeforeClass public static void beforeClass() { try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); - try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_pstmt"); - stmt.execute("create database if not exists test_pstmt"); - stmt.execute("use test_pstmt"); - stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))"); - stmt.execute("create table t1 using weather tags('beijing')"); - } + + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test_pstmt"); + stmt.execute("create database if not exists test_pstmt"); + stmt.execute("use test_pstmt"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))"); + stmt.execute("create table t1 using weather tags('beijing')"); + stmt.close(); + pstmt_insert = conn.prepareStatement(sql_insert); pstmt_select = conn.prepareStatement(sql_select); - } catch (ClassNotFoundException | SQLException e) { + pstmt_without_parameters = conn.prepareStatement(sql_without_parameters); + } catch (SQLException e) { e.printStackTrace(); } } @@ -396,6 +398,8 @@ public class RestfulPreparedStatementTest { pstmt_insert.close(); if (pstmt_select != null) pstmt_select.close(); + if (pstmt_without_parameters != null) + pstmt_without_parameters.close(); if (conn != null) conn.close(); } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index 81e762c5ca9646875f21acd89f55bd939440cfd4..21a91669b270df4dc2e8f7b4885fb9e8eedbfdf7 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -95,13 +95,13 @@ public class RestfulResultSetTest { public void getBigDecimal() throws SQLException { BigDecimal f1 = rs.getBigDecimal("f1"); long actual = (f1 == null) ? 0 : f1.longValue(); - Assert.assertEquals(1609430400000l, actual); + Assert.assertEquals(1609430400000L, actual); BigDecimal f2 = rs.getBigDecimal("f2"); Assert.assertEquals(1, f2.intValue()); BigDecimal f3 = rs.getBigDecimal("f3"); - Assert.assertEquals(100l, f3.longValue()); + Assert.assertEquals(100L, f3.longValue()); BigDecimal f4 = rs.getBigDecimal("f4"); Assert.assertEquals(3.1415f, f4.floatValue(), 0.00000f); @@ -125,13 +125,13 @@ public class RestfulResultSetTest { Assert.assertEquals(1, Ints.fromByteArray(f2)); byte[] f3 = rs.getBytes("f3"); - Assert.assertEquals(100l, Longs.fromByteArray(f3)); + Assert.assertEquals(100L, Longs.fromByteArray(f3)); byte[] f4 = rs.getBytes("f4"); - Assert.assertEquals(3.1415f, Float.valueOf(new String(f4)), 0.000000f); + Assert.assertEquals(3.1415f, Float.parseFloat(new String(f4)), 0.000000f); byte[] f5 = rs.getBytes("f5"); - Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f); + Assert.assertEquals(3.1415926, Double.parseDouble(new String(f5)), 0.000000f); byte[] f6 = rs.getBytes("f6"); Assert.assertEquals("abc", new String(f6)); @@ -222,7 +222,7 @@ public class RestfulResultSetTest { Object f3 = rs.getObject("f3"); Assert.assertEquals(Long.class, f3.getClass()); - Assert.assertEquals(100l, f3); + Assert.assertEquals(100L, f3); Object f4 = rs.getObject("f4"); Assert.assertEquals(Float.class, f4.getClass()); @@ -434,7 +434,7 @@ public class RestfulResultSetTest { @Test(expected = SQLFeatureNotSupportedException.class) public void updateLong() throws SQLException { - rs.updateLong(1, 1l); + rs.updateLong(1, 1L); } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java index 1be32b502d3f8f7c1b94cd1a8940073520e11b12..a7b3ceb9d3bb243a2a053d5289afe39d3c870d79 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java @@ -12,6 +12,7 @@ import java.util.UUID; public class RestfulStatementTest { private static final String host = "127.0.0.1"; + private static Connection conn; private static Statement stmt; @@ -21,11 +22,11 @@ public class RestfulStatementTest { ResultSet rs = stmt.executeQuery("show databases"); Assert.assertNotNull(rs); ResultSetMetaData meta = rs.getMetaData(); + int columnCount = meta.getColumnCount(); + Assert.assertTrue(columnCount > 1); while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); - } - System.out.println(); + Assert.assertEquals("name", meta.getColumnLabel(1)); + Assert.assertNotNull(rs.getString("name")); } rs.close(); } catch (SQLException e) { @@ -174,10 +175,10 @@ public class RestfulStatementTest { Assert.assertEquals(3, meta.getColumnCount()); int count = 0; while (rs.next()) { - for (int i = 1; i <= meta.getColumnCount(); i++) { - System.out.print(meta.getColumnLabel(i) + ": " + rs.getString(i) + "\t"); - } - System.out.println(); + Assert.assertEquals("ts", meta.getColumnLabel(1)); + Assert.assertNotNull(rs.getTimestamp(1)); + Assert.assertEquals("temperature", meta.getColumnLabel(2)); + Assert.assertEquals(22.33, rs.getFloat(2), 0.001f); count++; } Assert.assertEquals(1, count); @@ -388,15 +389,12 @@ public class RestfulStatementTest { @BeforeClass public static void beforeClass() { try { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", properties); stmt = conn.createStatement(); - } catch (ClassNotFoundException e) { - e.printStackTrace(); } catch (SQLException e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java index 4ad9826384a93e221b1181b72fa576bf72ebaff4..6b88de258dd4addda06cfb6e971b9d4dd267b7b4 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java @@ -1,342 +1,586 @@ package com.taosdata.jdbc.rs; -import com.taosdata.jdbc.utils.SQLExecutor; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.FixMethodOrder; -import org.junit.Test; +import org.junit.*; import org.junit.runners.MethodSorters; import java.sql.*; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class SQLTest { + private static final String host = "127.0.0.1"; private static Connection connection; @Test public void testCase001() { + // given String sql = "create database if not exists restful_test"; - SQLExecutor.execute(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase002() { + // given String sql = "use restful_test"; - SQLExecutor.execute(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase003() { + // given String sql = "show databases"; - SQLExecutor.executeWithResult(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertTrue(execute); } @Test public void testCase004() { + // given String sql = "show tables"; - SQLExecutor.executeWithResult(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertTrue(execute); } @Test public void testCase005() { + // given String sql = "show stables"; - SQLExecutor.executeWithResult(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertTrue(execute); } @Test public void testCase006() { + // given String sql = "show dnodes"; - SQLExecutor.executeWithResult(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertTrue(execute); } @Test public void testCase007() { + // given String sql = "show vgroups"; - SQLExecutor.executeWithResult(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertTrue(execute); } @Test public void testCase008() { + // given String sql = "drop table if exists restful_test.weather"; - SQLExecutor.execute(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase009() { + // given String sql = "create table if not exists restful_test.weather(ts timestamp, temperature float) tags(location nchar(64))"; - SQLExecutor.execute(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase010() { + // given String sql = "create table t1 using restful_test.weather tags('北京')"; - SQLExecutor.execute(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase011() { + // given String sql = "insert into restful_test.t1 values(now, 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase012() { + // given String sql = "insert into restful_test.t1 values('2020-01-01 00:00:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase013() { + // given String sql = "insert into restful_test.t1 values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase014() { + // given String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:03:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase015() { + // given String sql = "insert into restful_test.t2 using weather tags('上海') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase016() { + // given String sql = "insert into t1 values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t2 values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase017() { + // given String sql = "Insert into t3 using weather tags('广东') values('2020-01-01 01:0:00.000', 22.22),('2020-01-01 02:00:00.000', 22.22) t4 using weather tags('天津') values('2020-01-01 01:0:00.000', 33.33),('2020-01-01 02:00:00.000', 33.33)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + boolean execute = execute(connection, sql); + + // then + Assert.assertFalse(execute); } @Test public void testCase018() { + // given String sql = "select * from restful_test.t1"; - SQLExecutor.executeQuery(connection, sql); + // when + boolean execute = execute(connection, sql); + // then + Assert.assertTrue(execute); } @Test public void testCase019() { + // given String sql = "select * from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + boolean execute = execute(connection, sql); + // then + Assert.assertTrue(execute); } @Test public void testCase020() { + // given String sql = "select ts, temperature from restful_test.t1"; - SQLExecutor.executeQuery(connection, sql); + // when + boolean execute = execute(connection, sql); + // then + Assert.assertTrue(execute); } @Test public void testCase021() { + // given String sql = "select ts, temperature from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + boolean execute = execute(connection, sql); + // then + Assert.assertTrue(execute); } @Test public void testCase022() { + // given String sql = "select temperature, ts from restful_test.t1"; - SQLExecutor.executeQuery(connection, sql); + // when + boolean execute = execute(connection, sql); + // then + Assert.assertTrue(execute); } @Test public void testCase023() { + // given String sql = "select temperature, ts from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + boolean execute = execute(connection, sql); + // then + Assert.assertTrue(execute); } @Test public void testCase024() { + // given String sql = "import into restful_test.t5 using weather tags('石家庄') values('2020-01-01 00:01:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + // when + int affectedRows = executeUpdate(connection, sql); + // then + Assert.assertEquals(1, affectedRows); } @Test public void testCase025() { + // given String sql = "import into restful_test.t6 using weather tags('沈阳') values('2020-01-01 00:01:00.000', 22.22),('2020-01-01 00:02:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + // when + int affectedRows = executeUpdate(connection, sql); + // then + Assert.assertEquals(2, affectedRows); } @Test public void testCase026() { + // given String sql = "import into restful_test.t7 using weather tags('长沙') values('2020-01-01 00:01:00.000', 22.22) restful_test.t8 using weather tags('吉林') values('2020-01-01 00:01:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + + // when + int affectedRows = executeUpdate(connection, sql); + // then + Assert.assertEquals(2, affectedRows); } @Test public void testCase027() { + // given String sql = "import into restful_test.t9 using weather tags('武汉') values('2020-01-01 00:01:00.000', 22.22) ,('2020-01-02 00:01:00.000', 22.22) restful_test.t10 using weather tags('哈尔滨') values('2020-01-01 00:01:00.000', 22.22),('2020-01-02 00:01:00.000', 22.22)"; - SQLExecutor.executeUpdate(connection, sql); + // when + int affectedRows = executeUpdate(connection, sql); + // then + Assert.assertEquals(4, affectedRows); } @Test public void testCase028() { + // given String sql = "select location, temperature, ts from restful_test.weather where temperature > 1"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase029() { String sql = "select location, temperature, ts from restful_test.weather where temperature < 1"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase030() { String sql = "select location, temperature, ts from restful_test.weather where ts > now"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase031() { String sql = "select location, temperature, ts from restful_test.weather where ts < now"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase032() { String sql = "select count(*) from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase033() { String sql = "select first(*) from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase034() { String sql = "select last(*) from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase035() { String sql = "select last_row(*) from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase036() { String sql = "select ts, ts as primary_key from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase037() { String sql = "select database()"; - SQLExecutor.execute(connection, "use restful_test"); - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase038() { String sql = "select client_version()"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase039() { String sql = "select server_status()"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase040() { String sql = "select server_status() as status"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase041() { String sql = "select tbname, location from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase042() { String sql = "select count(tbname) from restful_test.weather"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase043() { String sql = "select * from restful_test.weather where ts < now - 1h"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase044() { String sql = "select * from restful_test.weather where ts < now - 1h and location like '%'"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase045() { String sql = "select * from restful_test.weather where ts < now - 1h order by ts"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase046() { String sql = "select last(*) from restful_test.weather where ts < now - 1h group by tbname order by tbname"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase047() { String sql = "select * from restful_test.weather limit 2"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase048() { String sql = "select * from restful_test.weather limit 2 offset 5"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase049() { String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts "; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase050() { String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase051() { String sql = "select * from restful_test.t1 tt, restful_test.t3 yy where tt.ts = yy.ts"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase052() { String sql = "select server_status()"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); } @Test public void testCase053() { String sql = "select avg(cpu_taosd), avg(cpu_system), max(cpu_cores), avg(mem_taosd), avg(mem_system), max(mem_total), avg(disk_used), max(disk_total), avg(band_speed), avg(io_read), avg(io_write), sum(req_http), sum(req_select), sum(req_insert) from log.dn1 where ts> now - 60m and ts<= now interval(1m) fill(value, 0)"; - SQLExecutor.executeQuery(connection, sql); + // when + ResultSet rs = executeQuery(connection, sql); + // then + Assert.assertNotNull(rs); + } + + private boolean execute(Connection connection, String sql) { + try (Statement statement = connection.createStatement()) { + return statement.execute(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + return false; + } + + private ResultSet executeQuery(Connection connection, String sql) { + try (Statement statement = connection.createStatement()) { + return statement.executeQuery(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + return null; + } + + private int executeUpdate(Connection connection, String sql) { + try (Statement statement = connection.createStatement()) { + return statement.executeUpdate(sql); + } catch (SQLException e) { + e.printStackTrace(); + } + return 0; } @BeforeClass - public static void before() throws ClassNotFoundException, SQLException { - Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + public static void before() throws SQLException { connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata"); } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java index fd6c83ad1cacfb351bcc1d35a5cf777213ad876d..269a19e7a031560cfa1c7c33af77ae1c0a6ff268 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java @@ -10,17 +10,17 @@ public class OSUtilsTest { @Test public void inWindows() { - Assert.assertEquals(OS.indexOf("win") >= 0, OSUtils.isWindows()); + Assert.assertEquals(OS.contains("win"), OSUtils.isWindows()); } @Test public void isMac() { - Assert.assertEquals(OS.indexOf("mac") >= 0, OSUtils.isMac()); + Assert.assertEquals(OS.contains("mac"), OSUtils.isMac()); } @Test public void isLinux() { - Assert.assertEquals(OS.indexOf("nux") >= 0, OSUtils.isLinux()); + Assert.assertEquals(OS.contains("nux"), OSUtils.isLinux()); } @Before diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SQLExecutor.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SQLExecutor.java deleted file mode 100644 index bf034bf458bcb8eadaaacb5cf633f0905a8c1bd6..0000000000000000000000000000000000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SQLExecutor.java +++ /dev/null @@ -1,74 +0,0 @@ -package com.taosdata.jdbc.utils; - -import java.sql.*; - -public class SQLExecutor { - - // insert, import - public static void executeUpdate(Connection connection, String sql) { - try (Statement statement = connection.createStatement()) { - long start = System.currentTimeMillis(); - int affectedRows = statement.executeUpdate(sql); - long end = System.currentTimeMillis(); - System.out.println("[ affected rows : " + affectedRows + " ] time cost: " + (end - start) + " ms, execute statement ====> " + sql); - } catch (SQLException e) { - e.printStackTrace(); - } - } - - // show databases, show tables, show stables - public static void executeWithResult(Connection connection, String sql) { - try (Statement statement = connection.createStatement()) { - statement.execute(sql); - ResultSet resultSet = statement.getResultSet(); - printResult(resultSet); - } catch (SQLException e) { - e.printStackTrace(); - } - } - - // use database, create database, create table, drop table... - public static void execute(Connection connection, String sql) { - try (Statement statement = connection.createStatement()) { - long start = System.currentTimeMillis(); - boolean execute = statement.execute(sql); - long end = System.currentTimeMillis(); - printSql(sql, execute, (end - start)); - } catch (SQLException e) { - System.out.println("ERROR execute SQL ===> " + sql); - e.printStackTrace(); - } - } - - // select - public static void executeQuery(Connection connection, String sql) { - try (Statement statement = connection.createStatement()) { - long start = System.currentTimeMillis(); - ResultSet resultSet = statement.executeQuery(sql); - long end = System.currentTimeMillis(); - printSql(sql, true, (end - start)); - printResult(resultSet); - } catch (SQLException e) { - System.out.println("ERROR execute SQL ===> " + sql); - e.printStackTrace(); - } - } - - private static void printSql(String sql, boolean succeed, long cost) { - System.out.println("[ " + (succeed ? "OK" : "ERROR!") + " ] time cost: " + cost + " ms, execute statement ====> " + sql); - } - - private static void printResult(ResultSet resultSet) throws SQLException { - ResultSetMetaData metaData = resultSet.getMetaData(); - while (resultSet.next()) { - StringBuilder sb = new StringBuilder(); - for (int i = 1; i <= metaData.getColumnCount(); i++) { - String columnLabel = metaData.getColumnLabel(i); - String value = resultSet.getString(i); - sb.append(columnLabel + ": " + value + "\t"); - } - System.out.println(sb.toString()); - } - } - -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/TimestampUtil.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/TimestampUtil.java index 16f8269d246f094558bbe7f6b11ac4bd90eb888f..eaebd92d8e8d9ef4022e9d5e9cb5a5bfceac878c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/TimestampUtil.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/TimestampUtil.java @@ -21,47 +21,4 @@ public class TimestampUtil { SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); return sdf.format(new Date(time)); } - - public static class TimeTuple { - public Long start; - public Long end; - public Long timeGap; - - TimeTuple(long start, long end, long timeGap) { - this.start = start; - this.end = end; - this.timeGap = timeGap; - } - } - - public static TimeTuple range(long start, long timeGap, long size) { - long now = System.currentTimeMillis(); - if (timeGap < 1) - timeGap = 1; - if (start == 0) - start = now - size * timeGap; - - // 如果size小于1异常 - if (size < 1) - throw new IllegalArgumentException("size less than 1."); - // 如果timeGap为1,已经超长,需要前移start - if (start + size > now) { - start = now - size; - return new TimeTuple(start, now, 1); - } - long end = start + (long) (timeGap * size); - if (end > now) { - //压缩timeGap - end = now; - double gap = (end - start) / (size * 1.0f); - if (gap < 1.0f) { - timeGap = 1; - start = end - size; - } else { - timeGap = (long) gap; - end = start + (long) (timeGap * size); - } - } - return new TimeTuple(start, end, timeGap); - } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java index c861ef296607b244e4564423ad4024ea1f13df5d..1cbd95b2492284b9c85f31bd6b6848d9c223df18 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java @@ -3,22 +3,138 @@ package com.taosdata.jdbc.utils; import org.junit.Assert; import org.junit.Test; -import static org.junit.Assert.*; +import java.util.stream.Stream; public class UtilsTest { @Test public void escapeSingleQuota() { + // given String s = "'''''a\\'"; + // when String news = Utils.escapeSingleQuota(s); + // then Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news); - s = "\'''''a\\'"; + // given + s = "'''''a\\'"; + // when news = Utils.escapeSingleQuota(s); + // then Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news); - s = "\'\'\'\''a\\'"; + // given + s = "'''''a\\'"; + // when news = Utils.escapeSingleQuota(s); + // then Assert.assertEquals("\\'\\'\\'\\'\\'a\\'", news); } + + @Test + public void lowerCase() { + // given + String nativeSql = "insert into ?.? (ts, temperature, humidity) using ?.? tags(?,?) values(now, ?, ?)"; + Object[] parameters = Stream.of("test", "t1", "test", "weather", "beijing", 1, 12.2, 4).toArray(); + + // when + String actual = Utils.getNativeSql(nativeSql, parameters); + + // then + String expected = "insert into test.t1 (ts, temperature, humidity) using test.weather tags('beijing',1) values(now, 12.2, 4)"; + Assert.assertEquals(expected, actual); + } + + @Test + public void upperCase() { + // given + String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)"; + Object[] parameters = Stream.of("d1", 1, 123, 3.14, 220, 4).toArray(); + + // when + String actual = Utils.getNativeSql(nativeSql, parameters); + + // then + String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (123,3.14,220,4)"; + Assert.assertEquals(expected, actual); + } + + @Test + public void multiValues() { + // given + String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?),(?,?,?,?)"; + Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5).toArray(); + + // when + String actual = Utils.getNativeSql(nativeSql, parameters); + + // then + String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4),(200,3.1415,'xyz',5)"; + Assert.assertEquals(expected, actual); + } + + @Test + public void lineTerminator() { + // given + String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,\r\n?,?),(?,?,?,?)"; + Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5).toArray(); + + // when + String actual = Utils.getNativeSql(nativeSql, parameters); + + // then + String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,\r\n'abc',4),(200,3.1415,'xyz',5)"; + Assert.assertEquals(expected, actual); + } + + @Test + public void lineTerminatorAndMultiValues() { + String nativeSql = "INSERT Into ? TAGS(?) VALUES(?,?,\r\n?,?),(?,? ,\r\n?,?) t? tags (?) Values (?,?,?\r\n,?),(?,?,?,?) t? Tags(?) values (?,?,?,?) , (?,?,?,?)"; + Object[] parameters = Stream.of("t1", "abc", 100, 1.1, "xxx", "xxx", 200, 2.2, "xxx", "xxx", 2, "bcd", 300, 3.3, "xxx", "xxx", 400, 4.4, "xxx", "xxx", 3, "cde", 500, 5.5, "xxx", "xxx", 600, 6.6, "xxx", "xxx").toArray(); + + // when + String actual = Utils.getNativeSql(nativeSql, parameters); + + // then + String expected = "INSERT Into t1 TAGS('abc') VALUES(100,1.1,\r\n'xxx','xxx'),(200,2.2 ,\r\n'xxx','xxx') t2 tags ('bcd') Values (300,3.3,'xxx'\r\n,'xxx'),(400,4.4,'xxx','xxx') t3 Tags('cde') values (500,5.5,'xxx','xxx') , (600,6.6,'xxx','xxx')"; + Assert.assertEquals(expected, actual); + } + + @Test + public void replaceNothing() { + // given + String nativeSql = "insert into test.t1 (ts, temperature, humidity) using test.weather tags('beijing',1) values(now, 12.2, 4)"; + + // when + String actual = Utils.getNativeSql(nativeSql, null); + + // then + Assert.assertEquals(nativeSql, actual); + } + + @Test + public void replaceNothing2() { + // given + String nativeSql = "insert into test.t1 (ts, temperature, humidity) using test.weather tags('beijing',1) values(now, 12.2, 4)"; + Object[] parameters = Stream.of("test", "t1", "test", "weather", "beijing", 1, 12.2, 4).toArray(); + + // when + String actual = Utils.getNativeSql(nativeSql, parameters); + + // then + Assert.assertEquals(nativeSql, actual); + } + + @Test + public void replaceNothing3() { + // given + String nativeSql = "insert into ?.? (ts, temperature, humidity) using ?.? tags(?,?) values(now, ?, ?)"; + + // when + String actual = Utils.getNativeSql(nativeSql, null); + + // then + Assert.assertEquals(nativeSql, actual); + + } } \ No newline at end of file diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index f3961e3787c4fb6d7da7092b68632d08a8b57e20..03d27e5593ccb15d8ff47cd3c3dedba765d14fc1 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -15,36 +15,18 @@ const { NULL_POINTER } = require('ref-napi'); module.exports = CTaosInterface; -function convertMillisecondsToDatetime(time) { - return new TaosObjects.TaosTimestamp(time); -} -function convertMicrosecondsToDatetime(time) { - return new TaosObjects.TaosTimestamp(time * 0.001, true); -} - -function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { - timestampConverter = convertMillisecondsToDatetime; - if (micro == true) { - timestampConverter = convertMicrosecondsToDatetime; - } +function convertTimestamp(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; while (currOffset < data.length) { - let queue = []; - let time = 0; - for (let i = currOffset; i < currOffset + nbytes; i++) { - queue.push(data[i]); - } - for (let i = queue.length - 1; i >= 0; i--) { - time += queue[i] * Math.pow(16, i * 2); - } + let time = data.readInt64LE(currOffset); currOffset += nbytes; - res.push(timestampConverter(time)); + res.push(new TaosObjects.TaosTimestamp(time, precision)); } return res; } -function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertBool(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = new Array(data.length); for (let i = 0; i < data.length; i++) { @@ -60,7 +42,7 @@ function convertBool(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { } return res; } -function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -71,7 +53,7 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, micro = false } return res; } -function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -82,7 +64,7 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, micro = fals } return res; } -function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -93,7 +75,7 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { } return res; } -function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -104,7 +86,7 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, micro = false) } return res; } -function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -115,7 +97,7 @@ function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, micro = false) } return res; } -function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { +function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; let currOffset = 0; @@ -126,30 +108,20 @@ function convertDouble(data, num_of_rows, nbytes = 0, offset = 0, micro = false) } return res; } -function convertBinary(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { + +function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; + let currOffset = 0; while (currOffset < data.length) { - let dataEntry = data.slice(currOffset, currOffset + nbytes); - if (dataEntry[0] == FieldTypes.C_BINARY_NULL) { - res.push(null); - } - else { - res.push(ref.readCString(dataEntry)); - } + let len = data.readIntLE(currOffset, 2); + let dataEntry = data.slice(currOffset + 2, currOffset + len + 2); //one entry in a row under a column; + res.push(dataEntry.toString("utf-8")); currOffset += nbytes; } return res; } -function convertNchar(data, num_of_rows, nbytes = 0, offset = 0, micro = false) { - data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); - let res = []; - let dataEntry = data.slice(0, nbytes); //one entry in a row under a column; - //TODO: should use the correct character encoding - res.push(dataEntry.toString("utf-8")); - return res; -} // Object with all the relevant converters from pblock data to javascript readable data let convertFunctions = { @@ -160,7 +132,7 @@ let convertFunctions = { [FieldTypes.C_BIGINT]: convertBigint, [FieldTypes.C_FLOAT]: convertFloat, [FieldTypes.C_DOUBLE]: convertDouble, - [FieldTypes.C_BINARY]: convertBinary, + [FieldTypes.C_BINARY]: convertNchar, [FieldTypes.C_TIMESTAMP]: convertTimestamp, [FieldTypes.C_NCHAR]: convertNchar } @@ -282,7 +254,7 @@ CTaosInterface.prototype.config = function config() { CTaosInterface.prototype.connect = function connect(host = null, user = "root", password = "taosdata", db = null, port = 0) { let _host, _user, _password, _db, _port; try { - _host = host != null ? ref.allocCString(host) : ref.alloc(ref.types.char_ptr, ref.NULL); + _host = host != null ? ref.allocCString(host) : ref.NULL; } catch (err) { throw "Attribute Error: host is expected as a str"; @@ -300,7 +272,7 @@ CTaosInterface.prototype.connect = function connect(host = null, user = "root", throw "Attribute Error: password is expected as a str"; } try { - _db = db != null ? ref.allocCString(db) : ref.alloc(ref.types.char_ptr, ref.NULL); + _db = db != null ? ref.allocCString(db) : ref.NULL; } catch (err) { throw "Attribute Error: db is expected as a str"; @@ -355,8 +327,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { } var fieldL = this.libtaos.taos_fetch_lengths(result); - - let isMicro = (this.libtaos.taos_result_precision(result) == FieldTypes.C_TIMESTAMP_MICRO); + let precision = this.libtaos.taos_result_precision(result); var fieldlens = []; @@ -383,7 +354,7 @@ CTaosInterface.prototype.fetchBlock = function fetchBlock(result, fields) { if (!convertFunctions[fields[i]['type']]) { throw new errors.DatabaseError("Invalid data type returned from database"); } - blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, isMicro); + blocks[i] = convertFunctions[fields[i]['type']](pdata, num_of_rows, fieldlens[i], offset, precision); } } return { blocks: blocks, num_of_rows } @@ -433,7 +404,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, let row = cti.libtaos.taos_fetch_row(result2); let fields = cti.fetchFields_a(result2); - let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO); + let precision = cti.libtaos.taos_result_precision(result2); let blocks = new Array(fields.length); blocks.fill(null); numOfRows2 = Math.abs(numOfRows2); @@ -459,7 +430,7 @@ CTaosInterface.prototype.fetch_rows_a = function fetch_rows_a(result, callback, let prow = ref.reinterpret(row, 8, i * 8); prow = prow.readPointer(); prow = ref.ref(prow); - blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, isMicro); + blocks[i] = convertFunctions[fields[i]['type']](prow, 1, fieldlens[i], offset, precision); //offset += fields[i]['bytes'] * numOfRows2; } } @@ -582,7 +553,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb var cti = this; let asyncCallbackWrapper = function (param2, result2, row) { let fields = cti.fetchFields_a(result2); - let isMicro = (cti.libtaos.taos_result_precision(result2) == FieldTypes.C_TIMESTAMP_MICRO); + let precision = cti.libtaos.taos_result_precision(result2); let blocks = new Array(fields.length); blocks.fill(null); let numOfRows2 = 1; @@ -592,7 +563,7 @@ CTaosInterface.prototype.openStream = function openStream(connection, sql, callb if (!convertFunctions[fields[i]['type']]) { throw new errors.DatabaseError("Invalid data type returned from database"); } - blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, isMicro); + blocks[i] = convertFunctions[fields[i]['type']](row, numOfRows2, fields[i]['bytes'], offset, precision); offset += fields[i]['bytes'] * numOfRows2; } } diff --git a/src/connector/nodejs/nodetaos/taosobjects.js b/src/connector/nodejs/nodetaos/taosobjects.js index 809d17a016ac5aafc504c71f6417858e9d00821b..0fc8dc8ef1a057c7e410956a2b68072e65cbb613 100644 --- a/src/connector/nodejs/nodetaos/taosobjects.js +++ b/src/connector/nodejs/nodetaos/taosobjects.js @@ -1,5 +1,5 @@ const FieldTypes = require('./constants'); - +const util = require('util'); /** * Various objects such as TaosRow and TaosColumn that help make parsing data easier * @module TaosObjects @@ -14,7 +14,7 @@ const FieldTypes = require('./constants'); * var trow = new TaosRow(row); * console.log(trow.data); */ -function TaosRow (row) { +function TaosRow(row) { this.data = row; this.length = row.length; return this; @@ -29,10 +29,10 @@ function TaosRow (row) { */ function TaosField(field) { - this._field = field; - this.name = field.name; - this.type = FieldTypes.getType(field.type); - return this; + this._field = field; + this.name = field.name; + this.type = FieldTypes.getType(field.type); + return this; } /** @@ -42,39 +42,110 @@ function TaosField(field) { * @param {Date} date - A Javascript date time object or the time in milliseconds past 1970-1-1 00:00:00.000 */ class TaosTimestamp extends Date { - constructor(date, micro = false) { - super(date); - this._type = 'TaosTimestamp'; - if (micro) { - this.microTime = date - Math.floor(date); + constructor(date, precision = 0) { + if (precision === 1) { + super(Math.floor(date / 1000)); + this.precisionExtras = date % 1000; + } else if (precision === 2) { + super(parseInt(date / 1000000)); + // use BigInt to fix: 1625801548423914405 % 1000000 = 914496 which not expected (914405) + this.precisionExtras = parseInt(BigInt(date) % 1000000n); + } else { + super(parseInt(date)); + } + this.precision = precision; + } + + /** + * TDengine raw timestamp. + * @returns raw taos timestamp (int64) + */ + taosTimestamp() { + if (this.precision == 1) { + return (this * 1000 + this.precisionExtras); + } else if (this.precision == 2) { + return (this * 1000000 + this.precisionExtras); + } else { + return Math.floor(this); + } + } + + /** + * Gets the microseconds of a Date. + * @return {Int} A microseconds integer + */ + getMicroseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000 + this.precisionExtras; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000 + this.precisionExtras / 1000; + } else { + return 0; + } + } + /** + * Gets the nanoseconds of a TaosTimestamp. + * @return {Int} A nanoseconds integer + */ + getNanoseconds() { + if (this.precision == 1) { + return this.getMilliseconds() * 1000000 + this.precisionExtras * 1000; + } else if (this.precision == 2) { + return this.getMilliseconds() * 1000000 + this.precisionExtras; + } else { + return 0; + } + } + + /** + * @returns {String} a string for timestamp string format + */ + _precisionExtra() { + if (this.precision == 1) { + return String(this.precisionExtras).padStart(3, '0'); + } else if (this.precision == 2) { + return String(this.precisionExtras).padStart(6, '0'); + } else { + return ''; } } /** * @function Returns the date into a string usable by TDengine * @return {string} A Taos Timestamp String */ - toTaosString(){ + toTaosString() { var tzo = -this.getTimezoneOffset(), - dif = tzo >= 0 ? '+' : '-', - pad = function(num) { - var norm = Math.floor(Math.abs(num)); - return (norm < 10 ? '0' : '') + norm; - }, - pad2 = function(num) { - var norm = Math.floor(Math.abs(num)); - if (norm < 10) return '00' + norm; - if (norm < 100) return '0' + norm; - if (norm < 1000) return norm; - }; + dif = tzo >= 0 ? '+' : '-', + pad = function (num) { + var norm = Math.floor(Math.abs(num)); + return (norm < 10 ? '0' : '') + norm; + }, + pad2 = function (num) { + var norm = Math.floor(Math.abs(num)); + if (norm < 10) return '00' + norm; + if (norm < 100) return '0' + norm; + if (norm < 1000) return norm; + }; return this.getFullYear() + - '-' + pad(this.getMonth() + 1) + - '-' + pad(this.getDate()) + - ' ' + pad(this.getHours()) + - ':' + pad(this.getMinutes()) + - ':' + pad(this.getSeconds()) + - '.' + pad2(this.getMilliseconds()) + - '' + (this.microTime ? pad2(Math.round(this.microTime * 1000)) : ''); + '-' + pad(this.getMonth() + 1) + + '-' + pad(this.getDate()) + + ' ' + pad(this.getHours()) + + ':' + pad(this.getMinutes()) + + ':' + pad(this.getSeconds()) + + '.' + pad2(this.getMilliseconds()) + + '' + this._precisionExtra(); + } + + /** + * Custom console.log + * @returns {String} string format for debug + */ + [util.inspect.custom](depth, opts) { + return this.toTaosString() + JSON.stringify({ precision: this.precision, precisionExtras: this.precisionExtras }, opts); + } + toString() { + return this.toTaosString(); } } -module.exports = {TaosRow, TaosField, TaosTimestamp} +module.exports = { TaosRow, TaosField, TaosTimestamp } diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index d21b62108b14e5a132ad5457d190bbcbc58b73a8..db37318a164c6207432ebb64defb608381d2cb49 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,13 +1,13 @@ { "name": "td2.0-connector", - "version": "2.0.7", + "version": "2.0.9", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "directories": { "test": "test" }, "scripts": { - "test": "node test/test.js" + "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js" }, "repository": { "type": "git", diff --git a/src/connector/nodejs/tdengine.js b/src/connector/nodejs/tdengine.js index aa296279d5e20f3d049d478ea2af44ea47a2b8e3..047c744a4fc90c6306e851eaa529a7f9f578fe12 100644 --- a/src/connector/nodejs/tdengine.js +++ b/src/connector/nodejs/tdengine.js @@ -1,4 +1,4 @@ var TDengineConnection = require('./nodetaos/connection.js') -module.exports.connect = function (connection=null) { +module.exports.connect = function (connection={}) { return new TDengineConnection(connection); } diff --git a/src/connector/nodejs/test/test.js b/src/connector/nodejs/test/test.js index bf4bb2c54188d3eb0f9c7fb5306912effc7b0760..caf05955da4c960ebedc872f400c17d18be767dd 100644 --- a/src/connector/nodejs/test/test.js +++ b/src/connector/nodejs/test/test.js @@ -1,5 +1,5 @@ const taos = require('../tdengine'); -var conn = taos.connect({host:"127.0.0.1", user:"root", password:"taosdata", config:"/etc/taos",port:10}); +var conn = taos.connect(); var c1 = conn.cursor(); let stime = new Date(); let interval = 1000; diff --git a/src/connector/nodejs/test/testMicroseconds.js b/src/connector/nodejs/test/testMicroseconds.js new file mode 100644 index 0000000000000000000000000000000000000000..cc65b3d919f92b3b4d7e0e216c6c8ac64a294d7f --- /dev/null +++ b/src/connector/nodejs/test/testMicroseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_us'; +c1.execute('create database if not exists ' + dbname + ' precision "us"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914) { + throw "microseconds not match!"; +} +if (ts.getMicroseconds() % 1000 !== 914) { + throw "micronsecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_us_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/src/connector/nodejs/test/testNanoseconds.js b/src/connector/nodejs/test/testNanoseconds.js new file mode 100644 index 0000000000000000000000000000000000000000..85a7600b01f2c908f22e621488f22678083149ea --- /dev/null +++ b/src/connector/nodejs/test/testNanoseconds.js @@ -0,0 +1,49 @@ +const taos = require('../tdengine'); +var conn = taos.connect(); +var c1 = conn.cursor(); +let stime = new Date(); +let interval = 1000; + +function convertDateToTS(date) { + let tsArr = date.toISOString().split("T") + return "\"" + tsArr[0] + " " + tsArr[1].substring(0, tsArr[1].length - 1) + "\""; +} +function R(l, r) { + return Math.random() * (r - l) - r; +} +function randomBool() { + if (Math.random() < 0.5) { + return true; + } + return false; +} + +// Initialize +//c1.execute('drop database td_connector_test;'); +const dbname = 'nodejs_test_ns'; +c1.execute('create database if not exists ' + dbname + ' precision "ns"'); +c1.execute('use ' + dbname) +c1.execute('create table if not exists tstest (ts timestamp, _int int);'); +c1.execute('insert into tstest values(1625801548423914405, 0)'); +// Select +console.log('select * from tstest'); +c1.execute('select * from tstest'); + +var d = c1.fetchall(); +console.log(c1.fields); +let ts = d[0][0]; +console.log(ts); + +if (ts.taosTimestamp() != 1625801548423914405) { + throw "nanosecond not match!"; +} +if (ts.getNanoseconds() % 1000000 !== 914405) { + throw "nanosecond precision error"; +} +setTimeout(function () { + c1.query('drop database nodejs_ns_test;'); +}, 200); + +setTimeout(function () { + conn.close(); +}, 2000); diff --git a/src/connector/odbc/CMakeLists.txt b/src/connector/odbc/CMakeLists.txt index 5a93ac3f7e2934fd8383c5a18f22c24845793f1a..87746f23ae3796f4d0ab20257f90599860430568 100644 --- a/src/connector/odbc/CMakeLists.txt +++ b/src/connector/odbc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_LINUX_64) @@ -20,8 +20,8 @@ IF (TD_LINUX_64) if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0.0) message(WARNING "gcc 4.8.0 will complain too much about flex-generated code, we just bypass building ODBC driver in such case") else () - SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion") - SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wconversion") + SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} ") + SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ") ADD_SUBDIRECTORY(src) ADD_SUBDIRECTORY(tools) ADD_SUBDIRECTORY(examples) diff --git a/src/connector/odbc/src/CMakeLists.txt b/src/connector/odbc/src/CMakeLists.txt index f0e50415e2e4f14e1c247b834e1e52a2c2fd2868..e990647e1aadcafb8b3306ee7e43a4d3ac285c94 100644 --- a/src/connector/odbc/src/CMakeLists.txt +++ b/src/connector/odbc/src/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) add_subdirectory(base) diff --git a/src/connector/odbc/src/base/CMakeLists.txt b/src/connector/odbc/src/base/CMakeLists.txt index fa13f3e07737bb6c2a36e5296a49a9f282346e3b..e34091360900a3a856d9fe56bb9fec994f4ba321 100644 --- a/src/connector/odbc/src/base/CMakeLists.txt +++ b/src/connector/odbc/src/base/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) aux_source_directory(. SRC) diff --git a/src/connector/python/README.md b/src/connector/python/README.md index 9151e9b8f0bdbabbaecf2ac34f830f4260b14326..a5dc2b72dafbeef0bf53fc1768f6afc66e714699 100644 --- a/src/connector/python/README.md +++ b/src/connector/python/README.md @@ -5,12 +5,13 @@ ## Install ```sh -pip install git+https://github.com/taosdata/TDengine-connector-python +git clone --depth 1 https://github.com/taosdata/TDengine.git +pip install ./TDengine/src/connector/python ``` ## Source Code -[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine-connector-python). +[TDengine] connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python). ## License - AGPL diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 61a2a0e9c70c3097a1376efb4bab6e250b4d4438..6d8ceb7a293ef71c7e8944772e6b8a6a0ed8e7a9 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -15,7 +15,7 @@ def _convert_microsecond_to_datetime(micro): def _convert_nanosecond_to_datetime(nanosec): - return datetime.datetime.fromtimestamp(nanosec / 1000000000.0) + return nanosec def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, precision=FieldType.C_TIMESTAMP_UNKNOWN): @@ -403,6 +403,20 @@ class CTaosInterface(object): """ return CTaosInterface.libtaos.taos_affected_rows(result) + @staticmethod + def insertLines(connection, lines): + ''' + insert through lines protocol + @lines: list of str + @rtype: tsdb error codes + ''' + numLines = len(lines) + c_lines_type = ctypes.c_char_p*numLines + c_lines = c_lines_type() + for i in range(numLines): + c_lines[i] = ctypes.c_char_p(lines[i].encode('utf-8')) + return CTaosInterface.libtaos.taos_insert_lines(connection, c_lines, ctypes.c_int(numLines)) + @staticmethod def subscribe(connection, restart, topic, sql, interval): """Create a subscription diff --git a/src/connector/python/taos/connection.py b/src/connector/python/taos/connection.py index f6c395342c9c39a24bda6022f0ed36cb7bfe045b..88d06cd7186018788aeb25c982fc205441193cb8 100644 --- a/src/connector/python/taos/connection.py +++ b/src/connector/python/taos/connection.py @@ -66,6 +66,14 @@ class TDengineConnection(object): self._conn, restart, topic, sql, interval) return TDengineSubscription(sub) + def insertLines(self, lines): + """ + insert lines through line protocol + """ + if self._conn is None: + return None + return CTaosInterface.insertLines(self._conn, lines) + def cursor(self): """Return a new Cursor object using the connection. """ diff --git a/src/cq/CMakeLists.txt b/src/cq/CMakeLists.txt index e9ed2996c74e2c59d56245e6fc1e932ebb07dfb0..f01ccb8728eb9a2a4695a8a0c133422e3134b8e2 100644 --- a/src/cq/CMakeLists.txt +++ b/src/cq/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index f539e7725315d2358767624ce74a8e9609a0b425..aac5a1c665c1417069b3978d10c7b1406a6b02a4 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -476,21 +476,23 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr); - int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize; + int32_t size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_MEM_ROW_DATA_HEAD_SIZE + pObj->rowSize; char *buffer = calloc(size, 1); SWalHead *pHead = (SWalHead *)buffer; SSubmitMsg *pMsg = (SSubmitMsg *) (buffer + sizeof(SWalHead)); SSubmitBlk *pBlk = (SSubmitBlk *) (buffer + sizeof(SWalHead) + sizeof(SSubmitMsg)); - SDataRow trow = (SDataRow)pBlk->data; - tdInitDataRow(trow, pSchema); + SMemRow trow = (SMemRow)pBlk->data; + SDataRow dataRow = (SDataRow)memRowDataBody(trow); + memRowSetType(trow, SMEM_ROW_DATA); + tdInitDataRow(dataRow, pSchema); for (int32_t i = 0; i < pSchema->numOfCols; i++) { STColumn *c = pSchema->columns + i; - void* val = row[i]; + void *val = row[i]; if (val == NULL) { - val = getNullValue(c->type); + val = (void *)getNullValue(c->type); } else if (c->type == TSDB_DATA_TYPE_BINARY) { val = ((char*)val) - sizeof(VarDataLenT); } else if (c->type == TSDB_DATA_TYPE_NCHAR) { @@ -500,9 +502,9 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { memcpy((char *)val + sizeof(VarDataLenT), buf, len); varDataLen(val) = len; } - tdAppendColVal(trow, val, c->type, c->bytes, c->offset); + tdAppendColVal(dataRow, val, c->type, c->offset); } - pBlk->dataLen = htonl(dataRowLen(trow)); + pBlk->dataLen = htonl(memRowDataTLen(trow)); pBlk->schemaLen = 0; pBlk->uid = htobe64(pObj->uid); @@ -511,7 +513,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) { pBlk->sversion = htonl(pSchema->version); pBlk->padding = 0; - pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + dataRowLen(trow); + pHead->len = sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + memRowDataTLen(trow); pMsg->header.vgId = htonl(pContext->vgId); pMsg->header.contLen = htonl(pHead->len); diff --git a/src/cq/test/CMakeLists.txt b/src/cq/test/CMakeLists.txt index cd124567afd8766173cf07e7a6191ab473be1714..d713dd7401c4f2d791ee0b4de1216b6ede558507 100644 --- a/src/cq/test/CMakeLists.txt +++ b/src/cq/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) LIST(APPEND CQTEST_SRC ./cqtest.c) diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index f8d8f88438429f1c0be405825cef4ab9c1b130bc..47186130ead0d1ee3f4593b7ef346f8cc47f7cba 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) @@ -18,7 +18,7 @@ ELSE () ENDIF () ADD_EXECUTABLE(taosd ${SRC}) -TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4 balance sync ${LINK_JEMALLOC}) +TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lua lz4 balance sync ${LINK_JEMALLOC}) IF (TD_SOMODE_STATIC) TARGET_LINK_LIBRARIES(taosd taos_static) diff --git a/src/dnode/inc/dnodeCfg.h b/src/dnode/inc/dnodeCfg.h index 896b3f574c2e0d02a0d62048a411fa484d16130b..99733e46ef10ca7c17c1f49b344414ce7f564574 100644 --- a/src/dnode/inc/dnodeCfg.h +++ b/src/dnode/inc/dnodeCfg.h @@ -27,6 +27,7 @@ void dnodeUpdateCfg(SDnodeCfg *cfg); int32_t dnodeGetDnodeId(); void dnodeGetClusterId(char *clusterId); void dnodeGetCfg(int32_t *dnodeId, char *clusterId); +void dnodeSetDropped(); #ifdef __cplusplus } diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c index 586adacc98712e63aa8d83367cb5d2ec5f9157ec..4269c77bf33eb9aa5fc0edafde39f9b4772d53d9 100644 --- a/src/dnode/src/dnodeCfg.c +++ b/src/dnode/src/dnodeCfg.c @@ -21,6 +21,7 @@ static SDnodeCfg tsCfg = {0}; static pthread_mutex_t tsCfgMutex; +static int32_t tsDnodeDropped; static int32_t dnodeReadCfg(); static int32_t dnodeWriteCfg(); @@ -34,6 +35,10 @@ int32_t dnodeInitCfg() { if (ret == 0) { dInfo("dnode cfg is initialized"); } + if (tsDnodeDropped) { + dInfo("dnode is dropped, exiting"); + return -1; + } return ret; } @@ -44,6 +49,14 @@ void dnodeUpdateCfg(SDnodeCfg *cfg) { dnodeResetCfg(cfg); } +void dnodeSetDropped() { + pthread_mutex_lock(&tsCfgMutex); + tsDnodeDropped = 1; + + dnodeWriteCfg(); + pthread_mutex_unlock(&tsCfgMutex); +} + int32_t dnodeGetDnodeId() { int32_t dnodeId = 0; pthread_mutex_lock(&tsCfgMutex); @@ -119,6 +132,14 @@ static int32_t dnodeReadCfg() { } cfg.dnodeId = (int32_t)dnodeId->valueint; + cJSON *dnodeDropped = cJSON_GetObjectItem(root, "dnodeDropped"); + if (!dnodeDropped || dnodeDropped->type != cJSON_Number) { + dError("failed to read %s, dnodeDropped not found", file); + //goto PARSE_CFG_OVER; + } else { + tsDnodeDropped = (int32_t)dnodeDropped->valueint; + } + cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId"); if (!clusterId || clusterId->type != cJSON_String) { dError("failed to read %s, clusterId not found", file); @@ -154,6 +175,7 @@ static int32_t dnodeWriteCfg() { len += snprintf(content + len, maxLen - len, "{\n"); len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsCfg.dnodeId); + len += snprintf(content + len, maxLen - len, " \"dnodeDropped\": %d,\n", tsDnodeDropped); len += snprintf(content + len, maxLen - len, " \"clusterId\": \"%s\"\n", tsCfg.clusterId); len += snprintf(content + len, maxLen - len, "}\n"); diff --git a/src/dnode/src/dnodeMPeer.c b/src/dnode/src/dnodeMPeer.c index e4942c49aaba48db8d2a1c74a0af532769de9553..8aa28d1618efe871de7795b0d403ee060cf5941c 100644 --- a/src/dnode/src/dnodeMPeer.c +++ b/src/dnode/src/dnodeMPeer.c @@ -150,6 +150,8 @@ static void *dnodeProcessMPeerQueue(void *param) { SMnodeMsg *pPeerMsg; int32_t type; void * unUsed; + + setThreadName("dnodeMPeerQ"); while (1) { if (taosReadQitemFromQset(tsMPeerQset, &type, (void **)&pPeerMsg, &unUsed) == 0) { diff --git a/src/dnode/src/dnodeMRead.c b/src/dnode/src/dnodeMRead.c index 90332e6783bc4861928833d5794f0787f80be993..184a6b743afdd5f4284a5acffc8d518356be4ee4 100644 --- a/src/dnode/src/dnodeMRead.c +++ b/src/dnode/src/dnodeMRead.c @@ -155,6 +155,8 @@ static void *dnodeProcessMReadQueue(void *param) { int32_t type; void * unUsed; + setThreadName("dnodeMReadQ"); + while (1) { if (taosReadQitemFromQset(tsMReadQset, &type, (void **)&pRead, &unUsed) == 0) { dDebug("qset:%p, mnode read got no message from qset, exiting", tsMReadQset); diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index a409d537fa8a56f03ed79d68358ac70b780e74e9..904ddc21d019343fa3f679db4d25cd9b01e1d97b 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -168,7 +168,9 @@ static void *dnodeProcessMWriteQueue(void *param) { SMnodeMsg *pWrite; int32_t type; void * unUsed; - + + setThreadName("dnodeMWriteQ"); + while (1) { if (taosReadQitemFromQset(tsMWriteQset, &type, (void **)&pWrite, &unUsed) == 0) { dDebug("qset:%p, mnode write got no message from qset, exiting", tsMWriteQset); diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index cf633502c14ddc3f85f8e8f36059c5e835d402c0..eac04fe7bb2c53bd55dcbf8b6f5044d99007dc6c 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -41,6 +41,9 @@ #include "dnodeTelemetry.h" #include "module.h" #include "mnode.h" +#include "qScript.h" +#include "tcache.h" +#include "tscompression.h" #if !defined(_MODULE) || !defined(_TD_LINUX) int32_t moduleStart() { return 0; } @@ -84,6 +87,7 @@ static SStep tsDnodeSteps[] = { {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, + {"dnode-script", scriptEnvPoolInit, scriptEnvPoolCleanup}, }; static SStep tsDnodeCompactSteps[] = { @@ -205,6 +209,7 @@ void dnodeCleanUpSystem() { dnodeCleanupComponents(); taos_cleanup(); taosCloseLog(); + taosStopCacheRefreshWorker(); } } @@ -234,6 +239,12 @@ static void dnodeCheckDataDirOpenned(char *dir) { } static int32_t dnodeInitStorage() { +#ifdef TD_TSZ + // compress module init + tsCompressInit(); +#endif + + // storage module init if (tsDiskCfgNum == 1 && dnodeCreateDir(tsDataDir) < 0) { dError("failed to create dir: %s, reason: %s", tsDataDir, strerror(errno)); return -1; @@ -266,7 +277,7 @@ static int32_t dnodeInitStorage() { return -1; } } - //TODO(dengyihao): no need to init here + //TODO(dengyihao): no need to init here if (dnodeCreateDir(tsMnodeDir) < 0) { dError("failed to create dir: %s, reason: %s", tsMnodeDir, strerror(errno)); return -1; @@ -303,11 +314,21 @@ static int32_t dnodeInitStorage() { dnodeCheckDataDirOpenned(tsDnodeDir); + taosGetDisk(); + taosPrintDiskInfo(); dInfo("dnode storage is initialized at %s", tsDnodeDir); return 0; } -static void dnodeCleanupStorage() { tfsDestroy(); } +static void dnodeCleanupStorage() { + // storage destroy + tfsDestroy(); + + #ifdef TD_TSZ + // compress destroy + tsCompressExit(); + #endif +} bool dnodeIsFirstDeploy() { return strcmp(tsFirst, tsLocalEp) == 0; diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index c80e1059b40b08b9eb592dc150974bef9746a1a9..5606681f0f931070e9cbf21d6b98b0d2eb51bdfa 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -48,9 +48,11 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DNODE] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_FUNCTION] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_FUNCTION] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TABLE]= dnodeDispatchToMWriteQueue; @@ -72,6 +74,7 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_TABLES_META] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE_FUNC] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeSendStartupStep; @@ -117,7 +120,14 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (pMsg->pCont == NULL) return; - if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) { + SRunStatus dnodeStatus = dnodeGetRunStatus(); + if (dnodeStatus == TSDB_RUN_STATUS_STOPPED) { + dError("RPC %p, shell msg:%s is ignored since dnode exiting", pMsg->handle, taosMsg[pMsg->msgType]); + rpcMsg.code = TSDB_CODE_DND_EXITING; + rpcSendResponse(&rpcMsg); + rpcFreeCont(pMsg->pCont); + return; + } else if (dnodeStatus != TSDB_RUN_STATUS_RUNING) { dError("RPC %p, shell msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]); rpcMsg.code = TSDB_CODE_APP_NOT_READY; rpcSendResponse(&rpcMsg); diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index 4caece16612353155886ff20055433878ec7411c..59b66879d42e3133ec7143c95f31c9b5df8ddb60 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -245,6 +245,8 @@ static void* telemetryThread(void* param) { clock_gettime(CLOCK_REALTIME, &end); end.tv_sec += 300; // wait 5 minutes before send first report + setThreadName("telemetryThrd"); + while (!tsExit) { int r = 0; struct timespec ts = end; diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index 90bae8b9dd73ca80efab8090d5a189cf8c1a8966..c1bfb1460b4b3058434c628f503a4775c4c24701 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -103,6 +103,8 @@ static void *dnodeProcessMgmtQueue(void *wparam) { int32_t qtype; void * handle; + setThreadName("dnodeMgmtQ"); + while (1) { if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pMgmt, &handle) == 0) { dDebug("qdnode mgmt got no message from qset:%p, , exit", pPool->qset); @@ -170,7 +172,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { SAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg); - void *pVnode = vnodeAcquire(pAlter->cfg.vgId); + void *pVnode = vnodeAcquireNotClose(pAlter->cfg.vgId); if (pVnode != NULL) { dDebug("vgId:%d, alter vnode msg is received", pAlter->cfg.vgId); int32_t code = vnodeAlter(pVnode, pAlter); diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index ea738661ce2813e13468ad91b4dc1d54775db21f..e8003a8fe7996316d0b91689ea9738cbd24184b9 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -63,7 +63,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { pHead->contLen = htonl(pHead->contLen); assert(pHead->contLen > 0); - void *pVnode = vnodeAcquire(pHead->vgId); + void *pVnode = vnodeAcquireNotClose(pHead->vgId); if (pVnode != NULL) { code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg); if (code == TSDB_CODE_SUCCESS) queuedMsgNum++; @@ -118,6 +118,11 @@ static void *dnodeProcessReadQueue(void *wparam) { SVReadMsg * pRead; int32_t qtype; void * pVnode; + char name[16]; + + memset(name, 0, 16); + snprintf(name, 16, "%s-dnReadQ", pPool->name); + setThreadName(name); while (1) { if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) { diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 26084a52eb1806c4fdce592d47471d92ec3e1cdb..ed2a6e210939e907e89dd07de27481d385e4ef24 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -85,7 +85,7 @@ void dnodeDispatchToVWriteQueue(SRpcMsg *pRpcMsg) { pMsg->vgId = htonl(pMsg->vgId); pMsg->contLen = htonl(pMsg->contLen); - void *pVnode = vnodeAcquire(pMsg->vgId); + void *pVnode = vnodeAcquireNotClose(pMsg->vgId); if (pVnode == NULL) { code = TSDB_CODE_VND_INVALID_VGROUP_ID; } else { @@ -191,6 +191,8 @@ static void *dnodeProcessVWriteQueue(void *wparam) { taosBlockSIGPIPE(); dDebug("dnode vwrite worker:%d is running", pWorker->workerId); + setThreadName("dnodeWriteQ"); + while (1) { numOfMsgs = taosReadAllQitemsFromQset(pWorker->qset, pWorker->qall, &pVnode); if (numOfMsgs == 0) { @@ -202,12 +204,12 @@ static void *dnodeProcessVWriteQueue(void *wparam) { for (int32_t i = 0; i < numOfMsgs; ++i) { taosGetQitem(pWorker->qall, &qtype, (void **)&pWrite); dTrace("msg:%p, app:%p type:%s will be processed in vwrite queue, qtype:%s hver:%" PRIu64, pWrite, - pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version); + pWrite->rpcMsg.ahandle, taosMsg[pWrite->walHead.msgType], qtypeStr[qtype], pWrite->walHead.version); - pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite); + pWrite->code = vnodeProcessWrite(pVnode, &pWrite->walHead, qtype, pWrite); if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1); if (pWrite->code > 0) pWrite->code = 0; - if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; + if (pWrite->code == 0 && pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; dTrace("msg:%p is processed in vwrite queue, code:0x%x", pWrite, pWrite->code); } @@ -222,7 +224,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) { dnodeSendRpcVWriteRsp(pVnode, pWrite, pWrite->code); } else { if (qtype == TAOS_QTYPE_FWD) { - vnodeConfirmForward(pVnode, pWrite->pHead.version, pWrite->code, pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); + vnodeConfirmForward(pVnode, pWrite->walHead.version, pWrite->code, pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT); } if (pWrite->rspRet.rsp) { rpcFreeCont(pWrite->rspRet.rsp); diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index d96251cebe1cbbcddeb48f1a09091e1c09caf630..8ea8e280de10f0657ad0b937fb78794175d8c20a 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -91,6 +91,8 @@ static void *dnodeOpenVnode(void *param) { dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); + setThreadName("dnodeOpenVnode"); + for (int32_t v = 0; v < pThread->vnodeNum; ++v) { int32_t vgId = pThread->vnodeList[v]; snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to restore, %d of %d have been opened", vgId, tsOpenVnodes, tsTotalVnodes); @@ -202,6 +204,7 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { char clusterId[TSDB_CLUSTER_ID_LEN]; dnodeGetClusterId(clusterId); if (clusterId[0] != '\0') { + dnodeSetDropped(); dError("exit zombie dropped dnode"); exit(EXIT_FAILURE); } diff --git a/src/inc/query.h b/src/inc/query.h index 38078cf21fcededd96dad833bbf1d22de55bb8ac..fb9cbff8584892b4a6bc6e4a6ce046a7500aef39 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -28,7 +28,7 @@ typedef void* qinfo_t; * @param qinfo * @return */ -int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, qinfo_t* qinfo, uint64_t *qId); +int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, qinfo_t* qinfo, uint64_t qId); /** diff --git a/src/inc/taos.h b/src/inc/taos.h index 9f72945ef03f28fb54ab05f84be810a0f9d5a66a..ba53c1ca8f57632d4270213be8eb7a7dbecd4dd2 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -72,6 +72,7 @@ DLL_EXPORT int taos_init(); DLL_EXPORT void taos_cleanup(void); DLL_EXPORT int taos_options(TSDB_OPTION option, const void *arg, ...); DLL_EXPORT TAOS *taos_connect(const char *ip, const char *user, const char *pass, const char *db, uint16_t port); +DLL_EXPORT TAOS *taos_connect_auth(const char *ip, const char *user, const char *auth, const char *db, uint16_t port); DLL_EXPORT void taos_close(TAOS *taos); const char *taos_data_type(int type); @@ -110,21 +111,23 @@ typedef struct TAOS_MULTI_BIND { } TAOS_MULTI_BIND; -TAOS_STMT *taos_stmt_init(TAOS *taos); -int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); -int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags); -int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name); -int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); -int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); + +DLL_EXPORT TAOS_STMT *taos_stmt_init(TAOS *taos); +DLL_EXPORT int taos_stmt_prepare(TAOS_STMT *stmt, const char *sql, unsigned long length); +DLL_EXPORT int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags); +DLL_EXPORT int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name); +DLL_EXPORT int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name); +DLL_EXPORT int taos_stmt_is_insert(TAOS_STMT *stmt, int *insert); +DLL_EXPORT int taos_stmt_num_params(TAOS_STMT *stmt, int *nums); int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes); -int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); +DLL_EXPORT int taos_stmt_bind_param(TAOS_STMT *stmt, TAOS_BIND *bind); int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind); int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int colIdx); -int taos_stmt_add_batch(TAOS_STMT *stmt); -int taos_stmt_execute(TAOS_STMT *stmt); -TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); -int taos_stmt_close(TAOS_STMT *stmt); -char * taos_stmt_errstr(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_add_batch(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_execute(TAOS_STMT *stmt); +DLL_EXPORT TAOS_RES * taos_stmt_use_result(TAOS_STMT *stmt); +DLL_EXPORT int taos_stmt_close(TAOS_STMT *stmt); +DLL_EXPORT char * taos_stmt_errstr(TAOS_STMT *stmt); DLL_EXPORT TAOS_RES *taos_query(TAOS *taos, const char *sql); DLL_EXPORT TAOS_ROW taos_fetch_row(TAOS_RES *res); @@ -139,10 +142,10 @@ DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int n DLL_EXPORT void taos_stop_query(TAOS_RES *res); DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); -int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); -int* taos_fetch_lengths(TAOS_RES *res); +DLL_EXPORT int* taos_fetch_lengths(TAOS_RES *res); // TAOS_RES *taos_list_tables(TAOS *mysql, const char *wild); // TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild); @@ -169,6 +172,8 @@ DLL_EXPORT void taos_close_stream(TAOS_STREAM *tstr); DLL_EXPORT int taos_load_table_info(TAOS *taos, const char* tableNameList); +DLL_EXPORT int taos_insert_lines(TAOS* taos, char* lines[], int numLines); + #ifdef __cplusplus } #endif diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index dac2dc84b6065d27514bbbbd2a836a81422408ad..ca8ad3cc09f99d06c1f0406bb7124a8aad6fef55 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -81,6 +81,8 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_DEFAULT_USER "root" #ifdef _TD_POWER_ #define TSDB_DEFAULT_PASS "powerdb" +#elif (_TD_TQ_ == true) +#define TSDB_DEFAULT_PASS "tqueue" #else #define TSDB_DEFAULT_PASS "taosdata" #endif @@ -100,7 +102,7 @@ extern const int32_t TYPE_BYTES[15]; #define TSDB_TIME_PRECISION_MICRO_STR "us" #define TSDB_TIME_PRECISION_NANO_STR "ns" -#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L)) +#define TSDB_TICK_PER_SECOND(precision) ((int64_t)((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))) #define T_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #define T_APPEND_MEMBER(dst, ptr, type, member) \ @@ -178,12 +180,16 @@ do { \ // this is the length of its string representation, including the terminator zero #define TSDB_ACCT_ID_LEN 11 -#define TSDB_MAX_COLUMNS 1024 +#define TSDB_MAX_COLUMNS 4096 #define TSDB_MIN_COLUMNS 2 //PRIMARY COLUMN(timestamp) + other columns #define TSDB_NODE_NAME_LEN 64 #define TSDB_TABLE_NAME_LEN 193 // it is a null-terminated string #define TSDB_DB_NAME_LEN 33 +#define TSDB_FUNC_NAME_LEN 65 +#define TSDB_FUNC_CODE_LEN (65535 - 512) +#define TSDB_FUNC_BUF_SIZE 512 +#define TSDB_TYPE_STR_MAX_LEN 32 #define TSDB_TABLE_FNAME_LEN (TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN + TSDB_TABLE_NAME_LEN) #define TSDB_COL_NAME_LEN 65 #define TSDB_MAX_SAVED_SQL_LEN TSDB_MAX_COLUMNS * 64 @@ -193,7 +199,13 @@ do { \ #define TSDB_APPNAME_LEN TSDB_UNI_LEN -#define TSDB_MAX_BYTES_PER_ROW 16384 + /** + * In some scenarios uint16_t (0~65535) is used to store the row len. + * - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header. + * - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus + * the final value is 65531-(4096-1)*4 = 49151. + */ +#define TSDB_MAX_BYTES_PER_ROW 49151 #define TSDB_MAX_TAGS_LEN 16384 #define TSDB_MAX_TAGS 128 #define TSDB_MAX_TAG_CONDITIONS 1024 @@ -321,8 +333,9 @@ do { \ #define TSDB_MAX_JOIN_TABLE_NUM 10 #define TSDB_MAX_UNION_CLAUSE 5 -#define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) -#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) +#define TSDB_MAX_FIELD_LEN 16384 +#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384 +#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384 #define PRIMARYKEY_TIMESTAMP_COL_INDEX 0 #define TSDB_MAX_RPC_THREADS 5 @@ -330,6 +343,10 @@ do { \ #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type #define TSDB_QUERY_TYPE_FREE_RESOURCE 0x01u // free qhandle at vnode +#define TSDB_UDF_TYPE_SCALAR 1 +#define TSDB_UDF_TYPE_AGGREGATE 2 + + /* * 1. ordinary sub query for select * from super_table * 2. all sqlobj generated by createSubqueryObj with this flag diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 431c9116ccec57d90a1dbe2405845f6c26a5fef6..2214078f5587799ff4daea4f708e920a95e97fcf 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -100,6 +100,9 @@ int32_t* taosGetErrno(); #define TSDB_CODE_TSC_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0217) //"Database not specified or available") #define TSDB_CODE_TSC_INVALID_TABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x0218) //"Table does not exist") #define TSDB_CODE_TSC_EXCEED_SQL_LIMIT TAOS_DEF_ERROR_CODE(0, 0x0219) //"SQL statement too long check maxSQLLength config") +#define TSDB_CODE_TSC_FILE_EMPTY TAOS_DEF_ERROR_CODE(0, 0x021A) //"File is empty") +#define TSDB_CODE_TSC_LINE_SYNTAX_ERROR TAOS_DEF_ERROR_CODE(0, 0x021B) //"Syntax error in Line") +#define TSDB_CODE_TSC_NO_META_CACHED TAOS_DEF_ERROR_CODE(0, 0x021C) //"No table meta cached") // mnode #define TSDB_CODE_MND_MSG_NOT_PROCESSED TAOS_DEF_ERROR_CODE(0, 0x0300) //"Message not processed") @@ -173,6 +176,14 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_FIELD_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x036C) //"Field does not exist") #define TSDB_CODE_MND_INVALID_STABLE_NAME TAOS_DEF_ERROR_CODE(0, 0x036D) //"Super table does not exist") #define TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG TAOS_DEF_ERROR_CODE(0, 0x036E) //"Invalid create table message") +#define TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES TAOS_DEF_ERROR_CODE(0, 0x036F) //"Exceed max row bytes") + +#define TSDB_CODE_MND_INVALID_FUNC_NAME TAOS_DEF_ERROR_CODE(0, 0x0370) //"Invalid func name") +#define TSDB_CODE_MND_INVALID_FUNC_LEN TAOS_DEF_ERROR_CODE(0, 0x0371) //"Invalid func length") +#define TSDB_CODE_MND_INVALID_FUNC_CODE TAOS_DEF_ERROR_CODE(0, 0x0372) //"Invalid func code") +#define TSDB_CODE_MND_FUNC_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0373) //"Func already exists") +#define TSDB_CODE_MND_INVALID_FUNC TAOS_DEF_ERROR_CODE(0, 0x0374) //"Invalid func") +#define TSDB_CODE_MND_INVALID_FUNC_BUFSIZE TAOS_DEF_ERROR_CODE(0, 0x0375) //"Invalid func bufSize") #define TSDB_CODE_MND_DB_NOT_SELECTED TAOS_DEF_ERROR_CODE(0, 0x0380) //"Database not specified or available") #define TSDB_CODE_MND_DB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0381) //"Database already exists") @@ -198,6 +209,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_DND_INVALID_MSG_LEN TAOS_DEF_ERROR_CODE(0, 0x0403) //"Invalid message length") #define TSDB_CODE_DND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0404) //"Action in progress") #define TSDB_CODE_DND_TOO_MANY_VNODES TAOS_DEF_ERROR_CODE(0, 0x0405) //"Too many vnode directories") +#define TSDB_CODE_DND_EXITING TAOS_DEF_ERROR_CODE(0, 0x0406) //"Dnode is exiting" // vnode #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) //"Action in progress") @@ -260,6 +272,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query") #define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached") #define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistency in replica") +#define TSDB_CODE_QRY_SYS_ERROR TAOS_DEF_ERROR_CODE(0, 0x070D) //"System error") // grant @@ -394,6 +407,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_HTTP_OP_VALUE_NULL TAOS_DEF_ERROR_CODE(0, 0x11A5) //"value not find") #define TSDB_CODE_HTTP_OP_VALUE_TYPE TAOS_DEF_ERROR_CODE(0, 0x11A6) //"value type should be boolean number or string") +#define TSDB_CODE_HTTP_REQUEST_JSON_ERROR TAOS_DEF_ERROR_CODE(0, 0x1F00) //"http request json error") + // odbc #define TSDB_CODE_ODBC_OOM TAOS_DEF_ERROR_CODE(0, 0x2100) //"out of memory") #define TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM TAOS_DEF_ERROR_CODE(0, 0x2101) //"convertion not a valid literal input") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 61e1e88c13fdced4c172b4c276067ff398b5f70b..06b80eea4f9536ca20bb9183c3fae1bafb96fafa 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -77,8 +77,10 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_USER, "drop-user" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DNODE, "create-dnode" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DNODE, "drop-dnode" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DB, "create-db" ) -TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" ) -TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_FUNCTION, "create-function" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_FUNCTION, "drop-function" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" ) @@ -96,7 +98,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_STREAM, "kill-stream" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_KILL_CONN, "kill-conn" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CONFIG_DNODE, "cm-config-dnode" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_HEARTBEAT, "heartbeat" ) -TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY8, "dummy8" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_RETRIEVE_FUNC, "retrieve-func" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY9, "dummy9" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY10, "dummy10" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY11, "dummy11" ) @@ -153,6 +155,7 @@ enum _mgmt_table { TSDB_MGMT_TABLE_STREAMTABLES, TSDB_MGMT_TABLE_CLUSTER, TSDB_MGMT_TABLE_TP, + TSDB_MGMT_TABLE_FUNCTION, TSDB_MGMT_TABLE_MAX, }; @@ -402,7 +405,7 @@ typedef struct SColIndex { int16_t colId; // column id int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag uint16_t flag; // denote if it is a tag or a normal column - char name[TSDB_COL_NAME_LEN]; // TODO remove it + char name[TSDB_COL_NAME_LEN + TSDB_DB_NAME_LEN + 1]; } SColIndex; typedef struct SColumnFilterInfo { @@ -507,6 +510,9 @@ typedef struct { int32_t prevResultLen; // previous result length int32_t numOfOperator; int32_t tableScanOperator;// table scan operator. -1 means no scan operator + int32_t udfNum; // number of udf function + int32_t udfContentOffset; + int32_t udfContentLen; SColumnInfo tableCols[]; } SQueryTableMsg; @@ -571,6 +577,41 @@ typedef struct { int8_t reserve[5]; } SCreateDbMsg, SAlterDbMsg; +typedef struct { + char name[TSDB_FUNC_NAME_LEN]; + char path[PATH_MAX]; + int32_t funcType; + uint8_t outputType; + int16_t outputLen; + int32_t bufSize; + int32_t codeLen; + char code[]; +} SCreateFuncMsg; + +typedef struct { + int32_t num; + char name[]; +} SRetrieveFuncMsg; + +typedef struct { + char name[TSDB_FUNC_NAME_LEN]; + int32_t funcType; + int8_t resType; + int16_t resBytes; + int32_t bufSize; + int32_t len; + char content[]; +} SFunctionInfoMsg; + +typedef struct { + int32_t num; + char content[]; +} SUdfFuncMsg; + +typedef struct { + char name[TSDB_FUNC_NAME_LEN]; +} SDropFuncMsg; + typedef struct { char db[TSDB_TABLE_FNAME_LEN]; uint8_t ignoreNotExists; @@ -710,8 +751,10 @@ typedef struct { } STableInfoMsg; typedef struct { + uint8_t metaClone; // create local clone of the cached table meta int32_t numOfVgroups; int32_t numOfTables; + int32_t numOfUdfs; char tableNames[]; } SMultiTableInfoMsg; @@ -762,7 +805,11 @@ typedef struct STableMetaMsg { typedef struct SMultiTableMeta { int32_t numOfTables; int32_t numOfVgroup; + int32_t numOfUdf; int32_t contLen; + uint8_t compressed; // denote if compressed or not + uint32_t rawLen; // size before compress + uint8_t metaClone; // make meta clone after retrieve meta from mnode char meta[]; } SMultiTableMeta; @@ -827,6 +874,10 @@ typedef struct { int64_t useconds; int64_t stime; uint64_t qId; + uint64_t sqlObjId; + int32_t pid; + char fqdn[TSDB_FQDN_LEN]; + int32_t numOfSub; } SQueryDesc; typedef struct { diff --git a/src/inc/tbn.h b/src/inc/tbn.h index b9f4e3c608a1ae3df3a4ea0dca32c7bf9d5820a9..b35f90eb153d02b4bb09f6f96a8dd09835626c97 100644 --- a/src/inc/tbn.h +++ b/src/inc/tbn.h @@ -31,6 +31,7 @@ void bnReset(); int32_t bnAllocVnodes(struct SVgObj *pVgroup); int32_t bnAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId); int32_t bnDropDnode(struct SDnodeObj *pDnode); +int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode); #ifdef __cplusplus } diff --git a/src/inc/tcq.h b/src/inc/tcq.h index 7549c3d498a02e948212c38b071701aa55adc2fc..27c043f960c2b77609b59f2188bfa0fa67c4b3b1 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -27,7 +27,7 @@ typedef struct { int32_t vgId; char user[TSDB_USER_LEN]; char pass[TSDB_KEY_LEN]; - char db[TSDB_DB_NAME_LEN]; + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; FCqWrite cqWrite; } SCqCfg; diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index f6047d57dc3e238f5eb20830b34c42034fac405d..e89fed628262eb78067c63e0f74347dc29123378 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -62,149 +62,154 @@ #define TK_SHOW 44 #define TK_DATABASES 45 #define TK_TOPICS 46 -#define TK_MNODES 47 -#define TK_DNODES 48 -#define TK_ACCOUNTS 49 -#define TK_USERS 50 -#define TK_MODULES 51 -#define TK_QUERIES 52 -#define TK_CONNECTIONS 53 -#define TK_STREAMS 54 -#define TK_VARIABLES 55 -#define TK_SCORES 56 -#define TK_GRANTS 57 -#define TK_VNODES 58 -#define TK_IPTOKEN 59 -#define TK_DOT 60 -#define TK_CREATE 61 -#define TK_TABLE 62 -#define TK_STABLE 63 -#define TK_DATABASE 64 -#define TK_TABLES 65 -#define TK_STABLES 66 -#define TK_VGROUPS 67 -#define TK_DROP 68 -#define TK_TOPIC 69 -#define TK_DNODE 70 -#define TK_USER 71 -#define TK_ACCOUNT 72 -#define TK_USE 73 -#define TK_DESCRIBE 74 -#define TK_ALTER 75 -#define TK_PASS 76 -#define TK_PRIVILEGE 77 -#define TK_LOCAL 78 -#define TK_COMPACT 79 -#define TK_LP 80 -#define TK_RP 81 -#define TK_IF 82 -#define TK_EXISTS 83 -#define TK_PPS 84 -#define TK_TSERIES 85 -#define TK_DBS 86 -#define TK_STORAGE 87 -#define TK_QTIME 88 -#define TK_CONNS 89 -#define TK_STATE 90 -#define TK_COMMA 91 -#define TK_KEEP 92 -#define TK_CACHE 93 -#define TK_REPLICA 94 -#define TK_QUORUM 95 -#define TK_DAYS 96 -#define TK_MINROWS 97 -#define TK_MAXROWS 98 -#define TK_BLOCKS 99 -#define TK_CTIME 100 -#define TK_WAL 101 -#define TK_FSYNC 102 -#define TK_COMP 103 -#define TK_PRECISION 104 -#define TK_UPDATE 105 -#define TK_CACHELAST 106 -#define TK_PARTITIONS 107 -#define TK_UNSIGNED 108 -#define TK_TAGS 109 -#define TK_USING 110 -#define TK_AS 111 -#define TK_NULL 112 -#define TK_SELECT 113 -#define TK_UNION 114 -#define TK_ALL 115 -#define TK_DISTINCT 116 -#define TK_FROM 117 -#define TK_VARIABLE 118 -#define TK_INTERVAL 119 -#define TK_SESSION 120 -#define TK_STATE_WINDOW 121 -#define TK_FILL 122 -#define TK_SLIDING 123 -#define TK_ORDER 124 -#define TK_BY 125 -#define TK_ASC 126 -#define TK_DESC 127 -#define TK_GROUP 128 -#define TK_HAVING 129 -#define TK_LIMIT 130 -#define TK_OFFSET 131 -#define TK_SLIMIT 132 -#define TK_SOFFSET 133 -#define TK_WHERE 134 -#define TK_NOW 135 -#define TK_RESET 136 -#define TK_QUERY 137 -#define TK_SYNCDB 138 -#define TK_ADD 139 -#define TK_COLUMN 140 -#define TK_MODIFY 141 -#define TK_TAG 142 -#define TK_CHANGE 143 -#define TK_SET 144 -#define TK_KILL 145 -#define TK_CONNECTION 146 -#define TK_STREAM 147 -#define TK_COLON 148 -#define TK_ABORT 149 -#define TK_AFTER 150 -#define TK_ATTACH 151 -#define TK_BEFORE 152 -#define TK_BEGIN 153 -#define TK_CASCADE 154 -#define TK_CLUSTER 155 -#define TK_CONFLICT 156 -#define TK_COPY 157 -#define TK_DEFERRED 158 -#define TK_DELIMITERS 159 -#define TK_DETACH 160 -#define TK_EACH 161 -#define TK_END 162 -#define TK_EXPLAIN 163 -#define TK_FAIL 164 -#define TK_FOR 165 -#define TK_IGNORE 166 -#define TK_IMMEDIATE 167 -#define TK_INITIALLY 168 -#define TK_INSTEAD 169 -#define TK_MATCH 170 -#define TK_KEY 171 -#define TK_OF 172 -#define TK_RAISE 173 -#define TK_REPLACE 174 -#define TK_RESTRICT 175 -#define TK_ROW 176 -#define TK_STATEMENT 177 -#define TK_TRIGGER 178 -#define TK_VIEW 179 -#define TK_SEMI 180 -#define TK_NONE 181 -#define TK_PREV 182 -#define TK_LINEAR 183 -#define TK_IMPORT 184 -#define TK_TBNAME 185 -#define TK_JOIN 186 -#define TK_INSERT 187 -#define TK_INTO 188 -#define TK_VALUES 189 +#define TK_FUNCTIONS 47 +#define TK_MNODES 48 +#define TK_DNODES 49 +#define TK_ACCOUNTS 50 +#define TK_USERS 51 +#define TK_MODULES 52 +#define TK_QUERIES 53 +#define TK_CONNECTIONS 54 +#define TK_STREAMS 55 +#define TK_VARIABLES 56 +#define TK_SCORES 57 +#define TK_GRANTS 58 +#define TK_VNODES 59 +#define TK_IPTOKEN 60 +#define TK_DOT 61 +#define TK_CREATE 62 +#define TK_TABLE 63 +#define TK_STABLE 64 +#define TK_DATABASE 65 +#define TK_TABLES 66 +#define TK_STABLES 67 +#define TK_VGROUPS 68 +#define TK_DROP 69 +#define TK_TOPIC 70 +#define TK_FUNCTION 71 +#define TK_DNODE 72 +#define TK_USER 73 +#define TK_ACCOUNT 74 +#define TK_USE 75 +#define TK_DESCRIBE 76 +#define TK_ALTER 77 +#define TK_PASS 78 +#define TK_PRIVILEGE 79 +#define TK_LOCAL 80 +#define TK_COMPACT 81 +#define TK_LP 82 +#define TK_RP 83 +#define TK_IF 84 +#define TK_EXISTS 85 +#define TK_AS 86 +#define TK_OUTPUTTYPE 87 +#define TK_AGGREGATE 88 +#define TK_BUFSIZE 89 +#define TK_PPS 90 +#define TK_TSERIES 91 +#define TK_DBS 92 +#define TK_STORAGE 93 +#define TK_QTIME 94 +#define TK_CONNS 95 +#define TK_STATE 96 +#define TK_COMMA 97 +#define TK_KEEP 98 +#define TK_CACHE 99 +#define TK_REPLICA 100 +#define TK_QUORUM 101 +#define TK_DAYS 102 +#define TK_MINROWS 103 +#define TK_MAXROWS 104 +#define TK_BLOCKS 105 +#define TK_CTIME 106 +#define TK_WAL 107 +#define TK_FSYNC 108 +#define TK_COMP 109 +#define TK_PRECISION 110 +#define TK_UPDATE 111 +#define TK_CACHELAST 112 +#define TK_PARTITIONS 113 +#define TK_UNSIGNED 114 +#define TK_TAGS 115 +#define TK_USING 116 +#define TK_NULL 117 +#define TK_NOW 118 +#define TK_SELECT 119 +#define TK_UNION 120 +#define TK_ALL 121 +#define TK_DISTINCT 122 +#define TK_FROM 123 +#define TK_VARIABLE 124 +#define TK_INTERVAL 125 +#define TK_SESSION 126 +#define TK_STATE_WINDOW 127 +#define TK_FILL 128 +#define TK_SLIDING 129 +#define TK_ORDER 130 +#define TK_BY 131 +#define TK_ASC 132 +#define TK_DESC 133 +#define TK_GROUP 134 +#define TK_HAVING 135 +#define TK_LIMIT 136 +#define TK_OFFSET 137 +#define TK_SLIMIT 138 +#define TK_SOFFSET 139 +#define TK_WHERE 140 +#define TK_RESET 141 +#define TK_QUERY 142 +#define TK_SYNCDB 143 +#define TK_ADD 144 +#define TK_COLUMN 145 +#define TK_MODIFY 146 +#define TK_TAG 147 +#define TK_CHANGE 148 +#define TK_SET 149 +#define TK_KILL 150 +#define TK_CONNECTION 151 +#define TK_STREAM 152 +#define TK_COLON 153 +#define TK_ABORT 154 +#define TK_AFTER 155 +#define TK_ATTACH 156 +#define TK_BEFORE 157 +#define TK_BEGIN 158 +#define TK_CASCADE 159 +#define TK_CLUSTER 160 +#define TK_CONFLICT 161 +#define TK_COPY 162 +#define TK_DEFERRED 163 +#define TK_DELIMITERS 164 +#define TK_DETACH 165 +#define TK_EACH 166 +#define TK_END 167 +#define TK_EXPLAIN 168 +#define TK_FAIL 169 +#define TK_FOR 170 +#define TK_IGNORE 171 +#define TK_IMMEDIATE 172 +#define TK_INITIALLY 173 +#define TK_INSTEAD 174 +#define TK_MATCH 175 +#define TK_KEY 176 +#define TK_OF 177 +#define TK_RAISE 178 +#define TK_REPLACE 179 +#define TK_RESTRICT 180 +#define TK_ROW 181 +#define TK_STATEMENT 182 +#define TK_TRIGGER 183 +#define TK_VIEW 184 +#define TK_SEMI 185 +#define TK_NONE 186 +#define TK_PREV 187 +#define TK_LINEAR 188 +#define TK_IMPORT 189 +#define TK_TBNAME 190 +#define TK_JOIN 191 +#define TK_INSERT 192 +#define TK_INTO 193 +#define TK_VALUES 194 #define TK_SPACE 300 diff --git a/src/inc/ttype.h b/src/inc/ttype.h index 9949f31c59f5805e381fbfbd8d5a65ba24eb40eb..fcd2651c701c6c7a31d276b807912f28111b4c4f 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -10,14 +10,28 @@ extern "C" { #include "taosdef.h" // ----------------- For variable data types such as TSDB_DATA_TYPE_BINARY and TSDB_DATA_TYPE_NCHAR -typedef int32_t VarDataOffsetT; -typedef int16_t VarDataLenT; +typedef int32_t VarDataOffsetT; +typedef int16_t VarDataLenT; // maxVarDataLen: 32767 +typedef uint16_t TDRowLenT; // not including overhead: 0 ~ 65535 +typedef uint32_t TDRowTLenT; // total length, including overhead typedef struct tstr { VarDataLenT len; char data[]; } tstr; +#pragma pack(push, 1) +typedef struct { + VarDataLenT len; + uint8_t data; +} SBinaryNullT; + +typedef struct { + VarDataLenT len; + uint32_t data; +} SNCharNullT; +#pragma pack(pop) + #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) #define varDataLen(v) ((VarDataLenT *)(v))[0] @@ -28,6 +42,10 @@ typedef struct tstr { #define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT) (_len)) #define IS_VAR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_BINARY) || ((t) == TSDB_DATA_TYPE_NCHAR)) +#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0])) +#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v)) + + // this data type is internally used only in 'in' query to hold the values #define TSDB_DATA_TYPE_ARRAY (1000) @@ -120,6 +138,8 @@ typedef struct tstr { #define IS_VALID_USMALLINT(_t) ((_t) >= 0 && (_t) < UINT16_MAX) #define IS_VALID_UINT(_t) ((_t) >= 0 && (_t) < UINT32_MAX) #define IS_VALID_UBIGINT(_t) ((_t) >= 0 && (_t) < UINT64_MAX) +#define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) +#define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) static FORCE_INLINE bool isNull(const char *val, int32_t type) { switch (type) { @@ -176,7 +196,7 @@ bool isValidDataType(int32_t type); void setVardataNull(char* val, int32_t type); void setNull(char *val, int32_t type, int32_t bytes); void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems); -void *getNullValue(int32_t type); +const void *getNullValue(int32_t type); void assignVal(char *val, const char *src, int32_t len, int32_t type); void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf); diff --git a/src/inc/vnode.h b/src/inc/vnode.h index 9dae862344b90580d36fc9fbba67a27cf60edc50..b3291645c00be17283f7d078acb2d4c9a2629ece 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -49,7 +49,7 @@ typedef struct { SRpcMsg rpcMsg; SRspRet rspRet; char reserveForSync[24]; - SWalHead pHead; + SWalHead walHead; } SVWriteMsg; // vnodeStatus @@ -69,6 +69,7 @@ int32_t vnodeInitMgmt(); void vnodeCleanupMgmt(); void* vnodeAcquire(int32_t vgId); void vnodeRelease(void *pVnode); +void* vnodeAcquireNotClose(int32_t vgId); void* vnodeGetWal(void *pVnode); int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); void vnodeBuildStatusMsg(void *pStatus); diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt index 66e8cf73988ab25db7544b9a52215d2279630c63..fdf58d5ae1c21ebd8b2948114d9643d38dccae3e 100644 --- a/src/kit/CMakeLists.txt +++ b/src/kit/CMakeLists.txt @@ -1,6 +1,7 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) ADD_SUBDIRECTORY(shell) ADD_SUBDIRECTORY(taosdemo) ADD_SUBDIRECTORY(taosdump) +ADD_SUBDIRECTORY(taospack) diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt index d9049454352efbd9344eae3c776f10c8f37fe090..bf2bbca14d25aff3b3717c7b9785f1dc470a013a 100644 --- a/src/kit/shell/CMakeLists.txt +++ b/src/kit/shell/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) @@ -19,9 +19,9 @@ ELSE () ENDIF () IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(shell taos_static ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(shell taos_static lua ${LINK_JEMALLOC}) ELSE () - TARGET_LINK_LIBRARIES(shell taos ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(shell taos lua ${LINK_JEMALLOC}) ENDIF () SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) diff --git a/src/kit/shell/inc/shellCommand.h b/src/kit/shell/inc/shellCommand.h index 3094bdb9ddb1ccd9debdbca88a34197385deb367..6e4d3e382e3d7e8c50405c07da8ed73725230434 100644 --- a/src/kit/shell/inc/shellCommand.h +++ b/src/kit/shell/inc/shellCommand.h @@ -35,6 +35,8 @@ struct Command { }; extern void backspaceChar(Command *cmd); +extern void clearLineBefore(Command *cmd); +extern void clearLineAfter(Command *cmd); extern void deleteChar(Command *cmd); extern void moveCursorLeft(Command *cmd); extern void moveCursorRight(Command *cmd); @@ -45,7 +47,7 @@ extern void updateBuffer(Command *cmd); extern int isReadyGo(Command *cmd); extern void resetCommand(Command *cmd, const char s[]); -int countPrefixOnes(char c); +int countPrefixOnes(unsigned char c); void clearScreen(int ecmd_pos, int cursor_pos); void printChar(char c, int times); void positionCursor(int step, int direction); diff --git a/src/kit/shell/src/shellCheck.c b/src/kit/shell/src/shellCheck.c index 4ff5dc36fc72bf2bfaa7ffea3f6a47c619c6235b..d78f1a6b996110f9cdce37e60dfbb85323a3a848 100644 --- a/src/kit/shell/src/shellCheck.c +++ b/src/kit/shell/src/shellCheck.c @@ -104,6 +104,8 @@ static void shellFreeTbnames() { static void *shellCheckThreadFp(void *arg) { ShellThreadObj *pThread = (ShellThreadObj *)arg; + setThreadName("shellCheckThrd"); + int32_t interval = tbNum / pThread->totalThreads + 1; int32_t start = pThread->threadIndex * interval; int32_t end = (pThread->threadIndex + 1) * interval; diff --git a/src/kit/shell/src/shellCommand.c b/src/kit/shell/src/shellCommand.c index 9173ab0efdae7e5900218b2ab256993df71b21dd..67e0c949890728268afcaf67804dd20e10231ba4 100644 --- a/src/kit/shell/src/shellCommand.c +++ b/src/kit/shell/src/shellCommand.c @@ -26,7 +26,7 @@ typedef struct { char widthOnScreen; } UTFCodeInfo; -int countPrefixOnes(char c) { +int countPrefixOnes(unsigned char c) { unsigned char mask = 127; mask = ~mask; int ret = 0; @@ -48,7 +48,7 @@ void getPrevCharSize(const char *str, int pos, int *size, int *width) { while (--pos >= 0) { *size += 1; - if (str[pos] > 0 || countPrefixOnes(str[pos]) > 1) break; + if (str[pos] > 0 || countPrefixOnes((unsigned char )str[pos]) > 1) break; } int rc = mbtowc(&wc, str + pos, MB_CUR_MAX); @@ -102,6 +102,28 @@ void backspaceChar(Command *cmd) { } } +void clearLineBefore(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + memmove(cmd->command, cmd->command + cmd->cursorOffset, + cmd->commandSize - cmd->cursorOffset); + cmd->commandSize -= cmd->cursorOffset; + cmd->cursorOffset = 0; + cmd->screenOffset = 0; + cmd->endOffset = cmd->commandSize; + showOnScreen(cmd); +} + +void clearLineAfter(Command *cmd) { + assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); + + clearScreen(cmd->endOffset + prompt_size, cmd->screenOffset + prompt_size); + cmd->commandSize -= cmd->endOffset - cmd->cursorOffset; + cmd->endOffset = cmd->cursorOffset; + showOnScreen(cmd); +} + void deleteChar(Command *cmd) { assert(cmd->cursorOffset <= cmd->commandSize && cmd->endOffset >= cmd->screenOffset); diff --git a/src/kit/shell/src/shellDarwin.c b/src/kit/shell/src/shellDarwin.c index 31ad7046e9176221e10c79b3f2367ea464529438..4dcd8b3d50628f626e681700e131ffb0a3b875e1 100644 --- a/src/kit/shell/src/shellDarwin.c +++ b/src/kit/shell/src/shellDarwin.c @@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) { updateBuffer(&cmd); } break; + case 11: // Ctrl + K; + clearLineAfter(&cmd); + break; case 12: // Ctrl + L; system("clear"); showOnScreen(&cmd); break; + case 21: // Ctrl + U + clearLineBefore(&cmd); + break; } } else if (c == '\033') { c = getchar(); @@ -336,6 +342,8 @@ void *shellLoopQuery(void *arg) { TAOS *con = (TAOS *)arg; + setThreadName("shellLoopQuery"); + pthread_cleanup_push(cleanup_handler, NULL); char *command = malloc(MAX_COMMAND_SIZE); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index d5fa0a9e09f5ac10d4d6a64c48d4639697c49165..58f4b7ff02b673288878aa44671ba2a544556cc5 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -37,6 +37,13 @@ char PROMPT_HEADER[] = "power> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 7; +#elif (_TD_TQ_ == true) +char CLIENT_VERSION[] = "Welcome to the TQ shell from %s, Client Version:%s\n" + "Copyright (c) 2020 by TQ, Inc. All rights reserved.\n\n"; +char PROMPT_HEADER[] = "tq> "; + +char CONTINUE_PROMPT[] = " -> "; +int prompt_size = 4; #else char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; @@ -248,7 +255,7 @@ int32_t shellRunCommand(TAOS* con, char* command) { if (quote == c) { quote = 0; - } else if (c == '\'' || c == '"') { + } else if (quote == 0 && (c == '\'' || c == '"')) { quote = c; } diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index 5de50a3aaf70074da0592e628b932e49aaf89c48..222d69e854933095ec0aadaa8a67bf1c19954c3b 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -223,6 +223,8 @@ static void shellSourceFile(TAOS *con, char *fptr) { void* shellImportThreadFp(void *arg) { ShellThreadObj *pThread = (ShellThreadObj*)arg; + setThreadName("shellImportThrd"); + for (int f = 0; f < shellSQLFileNum; ++f) { if (f % pThread->totalThreads == pThread->threadIndex) { char *SQLFileName = shellSQLFiles[f]; diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 4eead252fd4c47893ba966c1686d375fd7b9b6dc..dc74f6fcaa152c547d734ed4e186b45b94ce8de5 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -238,10 +238,16 @@ int32_t shellReadCommand(TAOS *con, char *command) { updateBuffer(&cmd); } break; + case 11: // Ctrl + K; + clearLineAfter(&cmd); + break; case 12: // Ctrl + L; system("clear"); showOnScreen(&cmd); break; + case 21: // Ctrl + U; + clearLineBefore(&cmd); + break; } } else if (c == '\033') { c = (char)getchar(); @@ -336,6 +342,8 @@ void *shellLoopQuery(void *arg) { TAOS *con = (TAOS *)arg; + setThreadName("shellLoopQuery"); + pthread_cleanup_push(cleanup_handler, NULL); char *command = malloc(MAX_COMMAND_SIZE); diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 4c7e550760cecb7c045cb8c94fc431cb5f91812b..0c70386061b99baaf2f9448ddadbb250685f23d4 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -26,6 +26,8 @@ void shellQueryInterruptHandler(int32_t signum, void *sigInfo, void *context) { } void *cancelHandler(void *arg) { + setThreadName("cancelHandler"); + while(1) { if (tsem_wait(&cancelSem) != 0) { taosMsleep(10); diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index 091eecfe270a20987a48f7223f57d2400169ac6d..2034093ad5841c267b722930681127d745d27153 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) @@ -67,7 +67,7 @@ IF (TD_LINUX) ADD_EXECUTABLE(taosdemo ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson ${LINK_JEMALLOC}) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua ${LINK_JEMALLOC}) ELSE () TARGET_LINK_LIBRARIES(taosdemo taos cJson ${LINK_JEMALLOC}) ENDIF () @@ -76,9 +76,9 @@ ELSEIF (TD_WINDOWS) ADD_EXECUTABLE(taosdemo ${SRC}) SET_SOURCE_FILES_PROPERTIES(./taosdemo.c PROPERTIES COMPILE_FLAGS -w) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson) + TARGET_LINK_LIBRARIES(taosdemo taos cJson lua) ENDIF () ELSEIF (TD_DARWIN) # missing a few dependencies, such as @@ -86,9 +86,9 @@ ELSEIF (TD_DARWIN) ADD_EXECUTABLE(taosdemo ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdemo taos_static cJson) + TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua) ELSE () - TARGET_LINK_LIBRARIES(taosdemo taos cJson) + TARGET_LINK_LIBRARIES(taosdemo taos cJson lua) ENDIF () ENDIF () diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 865224d2c3f513edcd8308731baf096cc697ea83..c4dec6a231e8b09bd837d0a8e3e57e6f5cb04274 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -16,7 +16,7 @@ /* when in some thread query return error, thread don't exit, but return, otherwise coredump in other thread. -*/ + */ #include #include @@ -24,24 +24,24 @@ #define CURL_STATICLIB #ifdef LINUX - #include - #include - #ifndef _ALPINE - #include - #endif - #include - #include - #include - #include - #include - #include - #include - #include - #include - #include +#include +#include +#ifndef _ALPINE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #else - #include - #include +#include +#include #endif #include @@ -54,77 +54,80 @@ #include "tutil.h" #define STMT_IFACE_ENABLED 1 +#define NANO_SECOND_ENABLED 1 +#define SET_THREADNAME_ENABLED 1 + +#if SET_THREADNAME_ENABLED == 0 +#define setThreadName(name) +#endif #define REQ_EXTRA_BUF_LEN 1024 #define RESP_BUF_LEN 4096 extern char configDir[]; -#define INSERT_JSON_NAME "insert.json" -#define QUERY_JSON_NAME "query.json" -#define SUBSCRIBE_JSON_NAME "subscribe.json" - #define STR_INSERT_INTO "INSERT INTO " -enum TEST_MODE { - INSERT_TEST, // 0 - QUERY_TEST, // 1 - SUBSCRIBE_TEST, // 2 - INVAID_TEST -}; - #define MAX_RECORDS_PER_REQ 32766 -#define HEAD_BUFF_LEN 1024*24 // 16*1024 + (192+32)*2 + insert into .. +#define HEAD_BUFF_LEN TSDB_MAX_COLUMNS*24 // 16*MAX_COLUMNS + (192+32)*2 + insert into .. + +#define BUFFER_SIZE TSDB_MAX_ALLOWED_SQL_LEN +#define COND_BUF_LEN (BUFFER_SIZE - 30) +#define COL_BUFFER_LEN ((TSDB_COL_NAME_LEN + 15) * TSDB_MAX_COLUMNS) -#define MAX_SQL_SIZE 65536 -#define BUFFER_SIZE (65536*2) -#define COND_BUF_LEN BUFFER_SIZE - 30 #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 -#define MAX_DB_NAME_SIZE 64 -#define MAX_HOSTNAME_SIZE 64 +#define MAX_HOSTNAME_SIZE 253 // https://man7.org/linux/man-pages/man7/hostname.7.html #define MAX_TB_NAME_SIZE 64 -#define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space -#define MAX_NUM_DATATYPE 10 +#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space #define OPT_ABORT 1 /* –abort */ -#define STRING_LEN 60000 #define MAX_PREPARED_RAND 1000000 -#define MAX_FILE_NAME_LEN 256 +#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. + +#define MAX_SAMPLES_ONCE_FROM_FILE 10000 +#define MAX_NUM_COLUMNS (TSDB_MAX_COLUMNS - 1) // exclude first column timestamp -#define MAX_SAMPLES_ONCE_FROM_FILE 10000 -#define MAX_NUM_DATATYPE 10 +#define MAX_DB_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 200 -#define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 200 -#define MAX_COLUMN_COUNT 1024 -#define MAX_TAG_COUNT 128 +#define MAX_QUERY_SQL_COUNT 100 +#define MAX_QUERY_SQL_LENGTH BUFFER_SIZE -#define MAX_QUERY_SQL_COUNT 100 -#define MAX_QUERY_SQL_LENGTH 1024 +#define MAX_DATABASE_COUNT 256 +#define INPUT_BUF_LEN 256 -#define MAX_DATABASE_COUNT 256 -#define INPUT_BUF_LEN 256 +#define TBNAME_PREFIX_LEN (TSDB_TABLE_NAME_LEN - 20) // 20 characters reserved for seq +#define SMALL_BUFF_LEN 8 +#define DATATYPE_BUFF_LEN (SMALL_BUFF_LEN*3) +#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16) #define DEFAULT_TIMESTAMP_STEP 1 +enum TEST_MODE { + INSERT_TEST, // 0 + QUERY_TEST, // 1 + SUBSCRIBE_TEST, // 2 + INVAID_TEST +}; + typedef enum CREATE_SUB_TALBE_MOD_EN { - PRE_CREATE_SUBTBL, - AUTO_CREATE_SUBTBL, - NO_CREATE_SUBTBL + PRE_CREATE_SUBTBL, + AUTO_CREATE_SUBTBL, + NO_CREATE_SUBTBL } CREATE_SUB_TALBE_MOD_EN; typedef enum TALBE_EXISTS_EN { - TBL_NO_EXISTS, - TBL_ALREADY_EXISTS, - TBL_EXISTS_BUTT + TBL_NO_EXISTS, + TBL_ALREADY_EXISTS, + TBL_EXISTS_BUTT } TALBE_EXISTS_EN; enum enumSYNC_MODE { - SYNC_MODE, - ASYNC_MODE, - MODE_BUT + SYNC_MODE, + ASYNC_MODE, + MODE_BUT }; enum enum_TAOS_INTERFACE { @@ -147,335 +150,329 @@ typedef enum enum_PROGRESSIVE_OR_INTERLACE { } PROG_OR_INTERLACE_MODE; typedef enum enumQUERY_TYPE { - NO_INSERT_TYPE, - INSERT_TYPE, - QUERY_TYPE_BUT + NO_INSERT_TYPE, + INSERT_TYPE, + QUERY_TYPE_BUT } QUERY_TYPE; enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB }; // -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- enum _show_stables_index { - TSDB_SHOW_STABLES_NAME_INDEX, - TSDB_SHOW_STABLES_CREATED_TIME_INDEX, - TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, - TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, - TSDB_MAX_SHOW_STABLES + TSDB_SHOW_STABLES_NAME_INDEX, + TSDB_SHOW_STABLES_CREATED_TIME_INDEX, + TSDB_SHOW_STABLES_COLUMNS_INDEX, + TSDB_SHOW_STABLES_METRIC_INDEX, + TSDB_SHOW_STABLES_UID_INDEX, + TSDB_SHOW_STABLES_TID_INDEX, + TSDB_SHOW_STABLES_VGID_INDEX, + TSDB_MAX_SHOW_STABLES }; enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC }; -typedef struct { - char field[TSDB_COL_NAME_LEN + 1]; - char type[16]; - int length; - char note[128]; -} SColDes; - /* Used by main to communicate with parse_opt. */ static char *g_dupstr = NULL; typedef struct SArguments_S { - char * metaFile; - uint32_t test_mode; - char * host; - uint16_t port; - uint16_t iface; - char * user; - char * password; - char * database; - int replica; - char * tb_prefix; - char * sqlFile; - bool use_metric; - bool drop_database; - bool insert_only; - bool answer_yes; - bool debug_print; - bool verbose_print; - bool performance_print; - char * output_file; - bool async_mode; - char * datatype[MAX_NUM_DATATYPE + 1]; - uint32_t len_of_binary; - uint32_t num_of_CPR; - uint32_t num_of_threads; - uint64_t insert_interval; - int64_t query_times; - uint32_t interlace_rows; - uint32_t num_of_RPR; // num_of_records_per_req - uint64_t max_sql_len; - int64_t num_of_tables; - int64_t num_of_DPT; - int abort; - uint32_t disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms or us by database precision - uint32_t method_of_delete; - char ** arg_list; - uint64_t totalInsertRows; - uint64_t totalAffectedRows; + char * metaFile; + uint32_t test_mode; + char * host; + uint16_t port; + uint16_t iface; + char * user; + char * password; + char * database; + int replica; + char * tb_prefix; + char * sqlFile; + bool use_metric; + bool drop_database; + bool insert_only; + bool answer_yes; + bool debug_print; + bool verbose_print; + bool performance_print; + char * output_file; + bool async_mode; + char * datatype[MAX_NUM_COLUMNS + 1]; + uint32_t len_of_binary; + uint32_t num_of_CPR; + uint32_t num_of_threads; + uint64_t insert_interval; + uint64_t timestamp_step; + int64_t query_times; + uint32_t interlace_rows; + uint32_t num_of_RPR; // num_of_records_per_req + uint64_t max_sql_len; + int64_t num_of_tables; + int64_t num_of_DPT; + int abort; + uint32_t disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms, us or ns. accordig to database precision + uint32_t method_of_delete; + char ** arg_list; + uint64_t totalInsertRows; + uint64_t totalAffectedRows; + bool demo_mode; // use default column name and semi-random data } SArguments; typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN + 1]; - char dataType[MAX_TB_NAME_SIZE]; - uint32_t dataLen; - char note[128]; + char field[TSDB_COL_NAME_LEN]; + char dataType[DATATYPE_BUFF_LEN]; + uint32_t dataLen; + char note[NOTE_BUFF_LEN]; } StrColumn; typedef struct SSuperTable_S { - char sTblName[MAX_TB_NAME_SIZE+1]; - char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample - char childTblPrefix[MAX_TB_NAME_SIZE]; - char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest - uint16_t childTblExists; - int64_t childTblCount; - uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql - uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table - uint16_t iface; // 0: taosc, 1: rest, 2: stmt - int64_t childTblLimit; - uint64_t childTblOffset; - -// int multiThreadWriteOneTbl; // 0: no, 1: yes - uint32_t interlaceRows; // - int disorderRatio; // 0: no disorder, >0: x% - int disorderRange; // ms or us by database precision - uint64_t maxSqlLen; // - - uint64_t insertInterval; // insert interval, will override global insert interval - int64_t insertRows; - int64_t timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; - char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN+1]; - char tagsFile[MAX_FILE_NAME_LEN+1]; - - uint32_t columnCount; - StrColumn columns[MAX_COLUMN_COUNT]; - uint32_t tagCount; - StrColumn tags[MAX_TAG_COUNT]; - - char* childTblName; - char* colsOfCreateChildTable; - uint64_t lenOfOneRow; - uint64_t lenOfTagOfOneRow; - - char* sampleDataBuf; - //int sampleRowCount; - //int sampleUsePos; - - uint32_t tagSource; // 0: rand, 1: tag sample - char* tagDataBuf; - uint32_t tagSampleCount; - uint32_t tagUsePos; - - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; + char sTblName[TSDB_TABLE_NAME_LEN]; + char dataSource[SMALL_BUFF_LEN]; // rand_gen or sample + char childTblPrefix[TBNAME_PREFIX_LEN]; + uint16_t childTblExists; + int64_t childTblCount; + uint64_t batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql + uint8_t autoCreateTable; // 0: create sub table, 1: auto create sub table + uint16_t iface; // 0: taosc, 1: rest, 2: stmt + int64_t childTblLimit; + uint64_t childTblOffset; + + // int multiThreadWriteOneTbl; // 0: no, 1: yes + uint32_t interlaceRows; // + int disorderRatio; // 0: no disorder, >0: x% + int disorderRange; // ms, us or ns. according to database precision + uint64_t maxSqlLen; // + + uint64_t insertInterval; // insert interval, will override global insert interval + int64_t insertRows; + int64_t timeStampStep; + char startTimestamp[MAX_TB_NAME_SIZE]; + char sampleFormat[SMALL_BUFF_LEN]; // csv, json + char sampleFile[MAX_FILE_NAME_LEN]; + char tagsFile[MAX_FILE_NAME_LEN]; + + uint32_t columnCount; + StrColumn columns[TSDB_MAX_COLUMNS]; + uint32_t tagCount; + StrColumn tags[TSDB_MAX_TAGS]; + + char* childTblName; + char* colsOfCreateChildTable; + uint64_t lenOfOneRow; + uint64_t lenOfTagOfOneRow; + + char* sampleDataBuf; + //int sampleRowCount; + //int sampleUsePos; + + uint32_t tagSource; // 0: rand, 1: tag sample + char* tagDataBuf; + uint32_t tagSampleCount; + uint32_t tagUsePos; + + // statistics + uint64_t totalInsertRows; + uint64_t totalAffectedRows; } SSuperTable; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; - char create_time[32]; - int64_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[32]; - int32_t cache; //MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[8]; // time resolution - int8_t update; - char status[16]; + char name[TSDB_DB_NAME_LEN]; + char create_time[32]; + int64_t ntables; + int32_t vgroups; + int16_t replica; + int16_t quorum; + int16_t days; + char keeplist[64]; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + int8_t cachelast; + char precision[SMALL_BUFF_LEN]; // time resolution + int8_t update; + char status[16]; } SDbInfo; typedef struct SDbCfg_S { -// int maxtablesPerVnode; - uint32_t minRows; // 0 means default - uint32_t maxRows; // 0 means default - int comp; - int walLevel; - int cacheLast; - int fsync; - int replica; - int update; - int keep; - int days; - int cache; - int blocks; - int quorum; - char precision[MAX_TB_NAME_SIZE]; + // int maxtablesPerVnode; + uint32_t minRows; // 0 means default + uint32_t maxRows; // 0 means default + int comp; + int walLevel; + int cacheLast; + int fsync; + int replica; + int update; + int keep; + int days; + int cache; + int blocks; + int quorum; + char precision[SMALL_BUFF_LEN]; } SDbCfg; typedef struct SDataBase_S { - char dbName[MAX_DB_NAME_SIZE]; - bool drop; // 0: use exists, 1: if exists, drop then new create - SDbCfg dbCfg; - uint64_t superTblCount; - SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; + char dbName[TSDB_DB_NAME_LEN]; + bool drop; // 0: use exists, 1: if exists, drop then new create + SDbCfg dbCfg; + uint64_t superTblCount; + SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; } SDataBase; typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN+1]; - char host[MAX_HOSTNAME_SIZE]; - struct sockaddr_in serv_addr; - - uint16_t port; - char user[MAX_USERNAME_SIZE]; - char password[MAX_PASSWORD_SIZE]; - char resultFile[MAX_FILE_NAME_LEN+1]; - bool use_metric; - bool insert_only; - bool do_aggreFunc; - bool asyncMode; - - uint32_t threadCount; - uint32_t threadCountByCreateTbl; - uint32_t dbCount; - SDataBase db[MAX_DB_COUNT]; - - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; + char cfgDir[MAX_FILE_NAME_LEN]; + char host[MAX_HOSTNAME_SIZE]; + struct sockaddr_in serv_addr; + + uint16_t port; + char user[MAX_USERNAME_SIZE]; + char password[MAX_PASSWORD_SIZE]; + char resultFile[MAX_FILE_NAME_LEN]; + bool use_metric; + bool insert_only; + bool do_aggreFunc; + bool asyncMode; + + uint32_t threadCount; + uint32_t threadCountByCreateTbl; + uint32_t dbCount; + SDataBase db[MAX_DB_COUNT]; + + // statistics + uint64_t totalInsertRows; + uint64_t totalAffectedRows; } SDbs; typedef struct SpecifiedQueryInfo_S { - uint64_t queryInterval; // 0: unlimit > 0 loop/s - uint32_t concurrent; - int sqlCount; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - uint64_t queryTimes; - bool subscribeRestart; - int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; - int resubAfterConsume[MAX_QUERY_SQL_COUNT]; - int endAfterConsume[MAX_QUERY_SQL_COUNT]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - char topic[MAX_QUERY_SQL_COUNT][32]; - int consumed[MAX_QUERY_SQL_COUNT]; - TAOS_RES* res[MAX_QUERY_SQL_COUNT]; - uint64_t totalQueried; + uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint32_t concurrent; + int sqlCount; + uint32_t asyncMode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + uint64_t queryTimes; + bool subscribeRestart; + int subscribeKeepProgress; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + int resubAfterConsume[MAX_QUERY_SQL_COUNT]; + int endAfterConsume[MAX_QUERY_SQL_COUNT]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + char topic[MAX_QUERY_SQL_COUNT][32]; + int consumed[MAX_QUERY_SQL_COUNT]; + TAOS_RES* res[MAX_QUERY_SQL_COUNT]; + uint64_t totalQueried; } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { - char sTblName[MAX_TB_NAME_SIZE+1]; - uint64_t queryInterval; // 0: unlimit > 0 loop/s - uint32_t threadCnt; - uint32_t asyncMode; // 0: sync, 1: async - uint64_t subscribeInterval; // ms - bool subscribeRestart; - int subscribeKeepProgress; - uint64_t queryTimes; - int64_t childTblCount; - char childTblPrefix[MAX_TB_NAME_SIZE]; - int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; - int resubAfterConsume; - int endAfterConsume; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - - char* childTblName; - uint64_t totalQueried; + char sTblName[TSDB_TABLE_NAME_LEN]; + uint64_t queryInterval; // 0: unlimit > 0 loop/s + uint32_t threadCnt; + uint32_t asyncMode; // 0: sync, 1: async + uint64_t subscribeInterval; // ms + bool subscribeRestart; + int subscribeKeepProgress; + uint64_t queryTimes; + int64_t childTblCount; + char childTblPrefix[TBNAME_PREFIX_LEN]; // 20 characters reserved for seq + int sqlCount; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; + int resubAfterConsume; + int endAfterConsume; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; + + char* childTblName; + uint64_t totalQueried; } SuperQueryInfo; typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN+1]; - char host[MAX_HOSTNAME_SIZE]; - uint16_t port; - struct sockaddr_in serv_addr; - char user[MAX_USERNAME_SIZE]; - char password[MAX_PASSWORD_SIZE]; - char dbName[MAX_DB_NAME_SIZE+1]; - char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest - - SpecifiedQueryInfo specifiedQueryInfo; - SuperQueryInfo superQueryInfo; - uint64_t totalQueried; + char cfgDir[MAX_FILE_NAME_LEN]; + char host[MAX_HOSTNAME_SIZE]; + uint16_t port; + struct sockaddr_in serv_addr; + char user[MAX_USERNAME_SIZE]; + char password[MAX_PASSWORD_SIZE]; + char dbName[TSDB_DB_NAME_LEN]; + char queryMode[SMALL_BUFF_LEN]; // taosc, rest + + SpecifiedQueryInfo specifiedQueryInfo; + SuperQueryInfo superQueryInfo; + uint64_t totalQueried; } SQueryMetaInfo; typedef struct SThreadInfo_S { - TAOS * taos; - TAOS_STMT *stmt; - int threadID; - char db_name[MAX_DB_NAME_SIZE+1]; - uint32_t time_precision; - char filePath[4096]; - FILE *fp; - char tb_prefix[MAX_TB_NAME_SIZE]; - uint64_t start_table_from; - uint64_t end_table_to; - int64_t ntables; - uint64_t data_of_rate; - int64_t start_time; - char* cols; - bool use_metric; - SSuperTable* superTblInfo; - char *buffer; // sql cmd buffer - - // for async insert - tsem_t lock_sem; - int64_t counter; - uint64_t st; - uint64_t et; - uint64_t lastTs; - - // sample data - int64_t samplePos; - // statistics - uint64_t totalInsertRows; - uint64_t totalAffectedRows; - - // insert delay statistics - uint64_t cntDelay; - uint64_t totalDelay; - uint64_t avgDelay; - uint64_t maxDelay; - uint64_t minDelay; - - // seq of query or subscribe - uint64_t querySeq; // sequence number of sql command - TAOS_SUB* tsub; + TAOS * taos; + TAOS_STMT *stmt; + int threadID; + char db_name[TSDB_DB_NAME_LEN]; + uint32_t time_precision; + char filePath[4096]; + FILE *fp; + char tb_prefix[TSDB_TABLE_NAME_LEN]; + uint64_t start_table_from; + uint64_t end_table_to; + int64_t ntables; + uint64_t data_of_rate; + int64_t start_time; + char* cols; + bool use_metric; + SSuperTable* superTblInfo; + char *buffer; // sql cmd buffer + + // for async insert + tsem_t lock_sem; + int64_t counter; + uint64_t st; + uint64_t et; + uint64_t lastTs; + + // sample data + int64_t samplePos; + // statistics + uint64_t totalInsertRows; + uint64_t totalAffectedRows; + + // insert delay statistics + uint64_t cntDelay; + uint64_t totalDelay; + uint64_t avgDelay; + uint64_t maxDelay; + uint64_t minDelay; + + // seq of query or subscribe + uint64_t querySeq; // sequence number of sql command + TAOS_SUB* tsub; } threadInfo; @@ -490,42 +487,42 @@ typedef unsigned __int32 uint32_t; #pragma comment ( lib, "ws2_32.lib" ) // Some old MinGW/CYGWIN distributions don't define this: #ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING - #define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 #endif // ENABLE_VIRTUAL_TERMINAL_PROCESSING static HANDLE g_stdoutHandle; static DWORD g_consoleMode; static void setupForAnsiEscape(void) { - DWORD mode = 0; - g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); + DWORD mode = 0; + g_stdoutHandle = GetStdHandle(STD_OUTPUT_HANDLE); - if(g_stdoutHandle == INVALID_HANDLE_VALUE) { - exit(GetLastError()); - } + if(g_stdoutHandle == INVALID_HANDLE_VALUE) { + exit(GetLastError()); + } - if(!GetConsoleMode(g_stdoutHandle, &mode)) { - exit(GetLastError()); - } + if(!GetConsoleMode(g_stdoutHandle, &mode)) { + exit(GetLastError()); + } - g_consoleMode = mode; + g_consoleMode = mode; - // Enable ANSI escape codes - mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + // Enable ANSI escape codes + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; - if(!SetConsoleMode(g_stdoutHandle, mode)) { - exit(GetLastError()); - } + if(!SetConsoleMode(g_stdoutHandle, mode)) { + exit(GetLastError()); + } } static void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); + // Reset colors + printf("\x1b[0m"); - // Reset console mode - if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { - exit(GetLastError()); - } + // Reset console mode + if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { + exit(GetLastError()); + } } static int taosRandom() @@ -539,15 +536,15 @@ static int taosRandom() static void setupForAnsiEscape(void) {} static void resetAfterAnsiEscape(void) { - // Reset colors - printf("\x1b[0m"); + // Reset colors + printf("\x1b[0m"); } #include static int taosRandom() { - return rand(); + return rand(); } #endif // ifdef Windows @@ -560,6 +557,8 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, char* sqlstr, threadInfo *pThreadInfo); static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, int disorderRatio, int disorderRange); +static bool getInfoFromJsonFile(char* file); +static void init_rand_data(); /* ************ Global variables ************ */ @@ -570,52 +569,59 @@ double randdouble[MAX_PREPARED_RAND]; char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; +#define DEFAULT_DATATYPE_NUM 3 + SArguments g_args = { - NULL, // metaFile - 0, // test_mode - "127.0.0.1", // host - 6030, // port - TAOSC_IFACE, // iface - "root", // user - #ifdef _TD_POWER_ - "powerdb", // password - #else - "taosdata", // password - #endif - "test", // database - 1, // replica - "t", // tb_prefix - NULL, // sqlFile - true, // use_metric - true, // drop_database - true, // insert_only - false, // debug_print - false, // verbose_print - false, // performance statistic print - false, // answer_yes; - "./output.txt", // output_file - 0, // mode : sync or async - { - "INT", // datatype - "INT", // datatype - "INT", // datatype - "INT", // datatype - }, - 16, // len_of_binary - 4, // num_of_CPR - 10, // num_of_connections/thread - 0, // insert_interval - 1, // query_times - 0, // interlace_rows; - 30000, // num_of_RPR - (1024*1024), // max_sql_len - 10000, // num_of_tables - 10000, // num_of_DPT - 0, // abort - 0, // disorderRatio - 1000, // disorderRange - 1, // method_of_delete - NULL // arg_list + NULL, // metaFile + 0, // test_mode + "127.0.0.1", // host + 6030, // port + INTERFACE_BUT, // iface + "root", // user +#ifdef _TD_POWER_ + "powerdb", // password +#elif (_TD_TQ_ == true) + "tqueue", // password +#else + "taosdata", // password +#endif + "test", // database + 1, // replica + "d", // tb_prefix + NULL, // sqlFile + true, // use_metric + true, // drop_database + true, // insert_only + false, // debug_print + false, // verbose_print + false, // performance statistic print + false, // answer_yes; + "./output.txt", // output_file + 0, // mode : sync or async + { + "FLOAT", // datatype + "INT", // datatype + "FLOAT", // datatype. DEFAULT_DATATYPE_NUM is 3 + }, + 16, // len_of_binary + 4, // num_of_CPR + 10, // num_of_connections/thread + 0, // insert_interval + DEFAULT_TIMESTAMP_STEP, // timestamp_step + 1, // query_times + 0, // interlace_rows; + 30000, // num_of_RPR + (1024*1024), // max_sql_len + 10000, // num_of_tables + 10000, // num_of_DPT + 0, // abort + 0, // disorderRatio + 1000, // disorderRange + 1, // method_of_delete + NULL, // arg_list + 0, // totalInsertRows; + 0, // totalAffectedRows; + true, // demo_mode; }; @@ -631,7 +637,7 @@ static FILE * g_fpOfInsertResult = NULL; #define debugPrint(fmt, ...) \ do { if (g_args.debug_print || g_args.verbose_print) \ - fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) + fprintf(stderr, "DEBG: "fmt, __VA_ARGS__); } while(0) #define verbosePrint(fmt, ...) \ do { if (g_args.verbose_print) \ @@ -639,10 +645,13 @@ static FILE * g_fpOfInsertResult = NULL; #define performancePrint(fmt, ...) \ do { if (g_args.performance_print) \ - fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) + fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ - do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) + do { fprintf(stderr, " \033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, " \033[0m"); } while(0) + +// for strncpy buffer overflow +#define min(a, b) (((a) < (b)) ? (a) : (b)) /////////////////////////////////////////////////// @@ -676,639 +685,690 @@ static void printVersion() { } static void printHelp() { - char indent[10] = " "; - printf("%s%s%s%s\n", indent, "-f", indent, - "The meta file to the execution procedure. Default is './meta.json'."); - printf("%s%s%s%s\n", indent, "-u", indent, - "The TDengine user name to use when connecting to the server. Default is 'root'."); + char indent[10] = " "; + printf("%s%s%s%s\n", indent, "-f", indent, + "The meta file to the execution procedure. Default is './meta.json'."); + printf("%s%s%s%s\n", indent, "-u", indent, + "The TDengine user name to use when connecting to the server. Default is 'root'."); #ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-P", indent, - "The password to use when connecting to the server. Default is 'powerdb'."); - printf("%s%s%s%s\n", indent, "-c", indent, - "Configuration directory. Default is '/etc/power/'."); + printf("%s%s%s%s\n", indent, "-P", indent, + "The password to use when connecting to the server. Default is 'powerdb'."); + printf("%s%s%s%s\n", indent, "-c", indent, + "Configuration directory. Default is '/etc/power/'."); +#elif (_TD_TQ_ == true) + printf("%s%s%s%s\n", indent, "-P", indent, + "The password to use when connecting to the server. Default is 'tqueue'."); + printf("%s%s%s%s\n", indent, "-c", indent, + "Configuration directory. Default is '/etc/tq/'."); #else - printf("%s%s%s%s\n", indent, "-P", indent, - "The password to use when connecting to the server. Default is 'taosdata'."); - printf("%s%s%s%s\n", indent, "-c", indent, - "Configuration directory. Default is '/etc/taos/'."); + printf("%s%s%s%s\n", indent, "-P", indent, + "The password to use when connecting to the server. Default is 'taosdata'."); + printf("%s%s%s%s\n", indent, "-c", indent, + "Configuration directory. Default is '/etc/taos/'."); #endif - printf("%s%s%s%s\n", indent, "-h", indent, - "The host to connect to TDengine. Default is localhost."); - printf("%s%s%s%s\n", indent, "-p", indent, - "The TCP/IP port number to use for the connection. Default is 0."); - printf("%s%s%s%s\n", indent, "-I", indent, + printf("%s%s%s%s\n", indent, "-h", indent, + "The host to connect to TDengine. Default is localhost."); + printf("%s%s%s%s\n", indent, "-p", indent, + "The TCP/IP port number to use for the connection. Default is 0."); + printf("%s%s%s%s\n", indent, "-I", indent, #if STMT_IFACE_ENABLED == 1 - "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); + "The interface (taosc, rest, and stmt) taosdemo uses. Default is 'taosc'."); #else - "The interface (taosc, rest) taosdemo uses. Default is 'taosc'."); + "The interface (taosc, rest) taosdemo uses. Default is 'taosc'."); #endif - printf("%s%s%s%s\n", indent, "-d", indent, - "Destination database. Default is 'test'."); - printf("%s%s%s%s\n", indent, "-a", indent, - "Set the replica parameters of the database, Default 1, min: 1, max: 3."); - printf("%s%s%s%s\n", indent, "-m", indent, - "Table prefix name. Default is 't'."); - printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file."); - printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag."); - printf("%s%s%s%s\n", indent, "-o", indent, - "Direct output to the named file. Default is './output.txt'."); - printf("%s%s%s%s\n", indent, "-q", indent, - "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); - printf("%s%s%s%s\n", indent, "-b", indent, - "The data_type of columns, default: INT,INT,INT,INT."); - printf("%s%s%s%s\n", indent, "-w", indent, - "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); - printf("%s%s%s%s%d\n", indent, "-l", indent, - "The number of columns per record. Default is 4. Max values is ", - MAX_NUM_DATATYPE); - printf("%s%s%s%s\n", indent, "-T", indent, - "The number of threads. Default is 10."); - printf("%s%s%s%s\n", indent, "-i", indent, - "The sleep time (ms) between insertion. Default is 0."); - printf("%s%s%s%s\n", indent, "-r", indent, - "The number of records per request. Default is 30000."); - printf("%s%s%s%s\n", indent, "-t", indent, - "The number of tables. Default is 10000."); - printf("%s%s%s%s\n", indent, "-n", indent, - "The number of records per table. Default is 10000."); - printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag."); - printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt."); - printf("%s%s%s%s\n", indent, "-O", indent, - "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order."); - printf("%s%s%s%s\n", indent, "-R", indent, - "Out of order data's range, ms, default is 1000."); - printf("%s%s%s%s\n", indent, "-g", indent, - "Print debug info."); - printf("%s%s%s\n", indent, "-V, --version\t", - "Print version info."); - printf("%s%s%s%s\n", indent, "--help\t", indent, - "Print command line arguments list info."); -/* printf("%s%s%s%s\n", indent, "-D", indent, - "if elete database if exists. 0: no, 1: yes, default is 1"); + printf("%s%s%s%s\n", indent, "-d", indent, + "Destination database. Default is 'test'."); + printf("%s%s%s%s\n", indent, "-a", indent, + "Set the replica parameters of the database, Default 1, min: 1, max: 3."); + printf("%s%s%s%s\n", indent, "-m", indent, + "Table prefix name. Default is 'd'."); + printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file."); + printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag."); + printf("%s%s%s%s\n", indent, "-o", indent, + "Direct output to the named file. Default is './output.txt'."); + printf("%s%s%s%s\n", indent, "-q", indent, + "Query mode -- 0: SYNC, 1: ASYNC. Default is SYNC."); + printf("%s%s%s%s\n", indent, "-b", indent, + "The data_type of columns, default: FLOAT, INT, FLOAT."); + printf("%s%s%s%s\n", indent, "-w", indent, + "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); + printf("%s%s%s%s%d%s%d\n", indent, "-l", indent, + "The number of columns per record. Default is ", + DEFAULT_DATATYPE_NUM, + ". Max values is ", + MAX_NUM_COLUMNS); + printf("%s%s%s%s\n", indent, indent, indent, + "All of the new column(s) type is INT. If use -b to specify column type, -l will be ignored."); + printf("%s%s%s%s\n", indent, "-T", indent, + "The number of threads. Default is 10."); + printf("%s%s%s%s\n", indent, "-i", indent, + "The sleep time (ms) between insertion. Default is 0."); + printf("%s%s%s%s%d.\n", indent, "-S", indent, + "The timestamp step between insertion. Default is ", + DEFAULT_TIMESTAMP_STEP); + printf("%s%s%s%s\n", indent, "-r", indent, + "The number of records per request. Default is 30000."); + printf("%s%s%s%s\n", indent, "-t", indent, + "The number of tables. Default is 10000."); + printf("%s%s%s%s\n", indent, "-n", indent, + "The number of records per table. Default is 10000."); + printf("%s%s%s%s\n", indent, "-M", indent, + "The value of records generated are totally random."); + printf("%s%s%s%s\n", indent, indent, indent, + " The default is to simulate power equipment senario."); + printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag."); + printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt."); + printf("%s%s%s%s\n", indent, "-O", indent, + "Insert mode--0: In order, 1 ~ 50: disorder ratio. Default is in order."); + printf("%s%s%s%s\n", indent, "-R", indent, + "Out of order data's range, ms, default is 1000."); + printf("%s%s%s%s\n", indent, "-g", indent, + "Print debug info."); + printf("%s%s%s\n", indent, "-V, --version\t", + "Print version info."); + printf("%s%s%s%s\n", indent, "--help\t", indent, + "Print command line arguments list info."); + /* printf("%s%s%s%s\n", indent, "-D", indent, + "Delete database if exists. 0: no, 1: yes, default is 1"); */ } static bool isStringNumber(char *input) { - int len = strlen(input); - if (0 == len) { - return false; - } + int len = strlen(input); + if (0 == len) { + return false; + } - for (int i = 0; i < len; i++) { - if (!isdigit(input[i])) - return false; - } + for (int i = 0; i < len; i++) { + if (!isdigit(input[i])) + return false; + } - return true; + return true; } static void parse_args(int argc, char *argv[], SArguments *arguments) { - for (int i = 1; i < argc; i++) { - if (strcmp(argv[i], "-f") == 0) { - arguments->metaFile = argv[++i]; - } else if (strcmp(argv[i], "-c") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-c need a valid path following!\n"); - exit(EXIT_FAILURE); - } - tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); - } else if (strcmp(argv[i], "-h") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-h need a valid string following!\n"); - exit(EXIT_FAILURE); - } - arguments->host = argv[++i]; - } else if (strcmp(argv[i], "-p") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-p need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->port = atoi(argv[++i]); - } else if (strcmp(argv[i], "-I") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-I need a valid string following!\n"); - exit(EXIT_FAILURE); - } - ++i; - if (0 == strcasecmp(argv[i], "taosc")) { - arguments->iface = TAOSC_IFACE; - } else if (0 == strcasecmp(argv[i], "rest")) { - arguments->iface = REST_IFACE; + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-f") == 0) { + arguments->demo_mode = false; + arguments->metaFile = argv[++i]; + } else if (strcmp(argv[i], "-c") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-c need a valid path following!\n"); + exit(EXIT_FAILURE); + } + tstrncpy(configDir, argv[++i], TSDB_FILENAME_LEN); + } else if (strcmp(argv[i], "-h") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-h need a valid string following!\n"); + exit(EXIT_FAILURE); + } + arguments->host = argv[++i]; + } else if (strcmp(argv[i], "-p") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-p need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->port = atoi(argv[++i]); + } else if (strcmp(argv[i], "-I") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-I need a valid string following!\n"); + exit(EXIT_FAILURE); + } + ++i; + if (0 == strcasecmp(argv[i], "taosc")) { + arguments->iface = TAOSC_IFACE; + } else if (0 == strcasecmp(argv[i], "rest")) { + arguments->iface = REST_IFACE; #if STMT_IFACE_ENABLED == 1 - } else if (0 == strcasecmp(argv[i], "stmt")) { - arguments->iface = STMT_IFACE; + } else if (0 == strcasecmp(argv[i], "stmt")) { + arguments->iface = STMT_IFACE; #endif - } else { - errorPrint("%s", "\n\t-I need a valid string following!\n"); - exit(EXIT_FAILURE); - } - } else if (strcmp(argv[i], "-u") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-u need a valid string following!\n"); - exit(EXIT_FAILURE); - } - arguments->user = argv[++i]; - } else if (strcmp(argv[i], "-P") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-P need a valid string following!\n"); - exit(EXIT_FAILURE); - } - arguments->password = argv[++i]; - } else if (strcmp(argv[i], "-o") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-o need a valid string following!\n"); - exit(EXIT_FAILURE); - } - arguments->output_file = argv[++i]; - } else if (strcmp(argv[i], "-s") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-s need a valid string following!\n"); - exit(EXIT_FAILURE); - } - arguments->sqlFile = argv[++i]; - } else if (strcmp(argv[i], "-q") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n"); - exit(EXIT_FAILURE); - } - arguments->async_mode = atoi(argv[++i]); - } else if (strcmp(argv[i], "-T") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-T need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->num_of_threads = atoi(argv[++i]); - } else if (strcmp(argv[i], "-i") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-i need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->insert_interval = atoi(argv[++i]); - } else if (strcmp(argv[i], "-qt") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-qt need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->query_times = atoi(argv[++i]); - } else if (strcmp(argv[i], "-B") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-B need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->interlace_rows = atoi(argv[++i]); - } else if (strcmp(argv[i], "-r") == 0) { - if ((argc == i+1) - || (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-r need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->num_of_RPR = atoi(argv[++i]); - } else if (strcmp(argv[i], "-t") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-t need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->num_of_tables = atoi(argv[++i]); - } else if (strcmp(argv[i], "-n") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-n need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->num_of_DPT = atoi(argv[++i]); - } else if (strcmp(argv[i], "-d") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-d need a valid string following!\n"); - exit(EXIT_FAILURE); - } - arguments->database = argv[++i]; - } else if (strcmp(argv[i], "-l") == 0) { - if (argc == i+1) { - if (!isStringNumber(argv[i+1])) { - printHelp(); - errorPrint("%s", "\n\t-l need a number following!\n"); - exit(EXIT_FAILURE); - } - } - arguments->num_of_CPR = atoi(argv[++i]); + } else { + errorPrint("%s", "\n\t-I need a valid string following!\n"); + exit(EXIT_FAILURE); + } + } else if (strcmp(argv[i], "-u") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-u need a valid string following!\n"); + exit(EXIT_FAILURE); + } + arguments->user = argv[++i]; + } else if (strcmp(argv[i], "-P") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-P need a valid string following!\n"); + exit(EXIT_FAILURE); + } + arguments->password = argv[++i]; + } else if (strcmp(argv[i], "-o") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-o need a valid string following!\n"); + exit(EXIT_FAILURE); + } + arguments->output_file = argv[++i]; + } else if (strcmp(argv[i], "-s") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-s need a valid string following!\n"); + exit(EXIT_FAILURE); + } + arguments->sqlFile = argv[++i]; + } else if (strcmp(argv[i], "-q") == 0) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-q need a number following!\nQuery mode -- 0: SYNC, not-0: ASYNC. Default is SYNC.\n"); + exit(EXIT_FAILURE); + } + arguments->async_mode = atoi(argv[++i]); + } else if (strcmp(argv[i], "-T") == 0) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-T need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->num_of_threads = atoi(argv[++i]); + } else if (strcmp(argv[i], "-i") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-i need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->insert_interval = atoi(argv[++i]); + } else if (strcmp(argv[i], "-S") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("\n\t%s%s", argv[i], " need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->timestamp_step = atoi(argv[++i]); + } else if (strcmp(argv[i], "-qt") == 0) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-qt need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->query_times = atoi(argv[++i]); + } else if (strcmp(argv[i], "-B") == 0) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-B need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->interlace_rows = atoi(argv[++i]); + } else if (strcmp(argv[i], "-r") == 0) { + if ((argc == i+1) + || (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-r need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->num_of_RPR = atoi(argv[++i]); + } else if (strcmp(argv[i], "-t") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-t need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->num_of_tables = atoi(argv[++i]); + } else if (strcmp(argv[i], "-n") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-n need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->num_of_DPT = atoi(argv[++i]); + } else if (strcmp(argv[i], "-d") == 0) { + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-d need a valid string following!\n"); + exit(EXIT_FAILURE); + } + arguments->database = argv[++i]; + } else if (strcmp(argv[i], "-l") == 0) { + arguments->demo_mode = false; + if (argc == i+1) { + if (!isStringNumber(argv[i+1])) { + printHelp(); + errorPrint("%s", "\n\t-l need a number following!\n"); + exit(EXIT_FAILURE); + } + } + arguments->num_of_CPR = atoi(argv[++i]); - if (arguments->num_of_CPR > MAX_NUM_DATATYPE) { - printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_DATATYPE); - prompt(); - arguments->num_of_CPR = MAX_NUM_DATATYPE; - } + if (arguments->num_of_CPR > MAX_NUM_COLUMNS) { + printf("WARNING: max acceptible columns count is %d\n", MAX_NUM_COLUMNS); + prompt(); + arguments->num_of_CPR = MAX_NUM_COLUMNS; + } - for (int col = arguments->num_of_CPR; col < MAX_NUM_DATATYPE; col++) { - arguments->datatype[col] = NULL; - } + for (int col = DEFAULT_DATATYPE_NUM; col < arguments->num_of_CPR; col ++) { + arguments->datatype[col] = "INT"; + } + for (int col = arguments->num_of_CPR; col < MAX_NUM_COLUMNS; col++) { + arguments->datatype[col] = NULL; + } + } else if (strcmp(argv[i], "-b") == 0) { + arguments->demo_mode = false; + if (argc == i+1) { + printHelp(); + errorPrint("%s", "\n\t-b need valid string following!\n"); + exit(EXIT_FAILURE); + } + ++i; + if (strstr(argv[i], ",") == NULL) { + // only one col + if (strcasecmp(argv[i], "INT") + && strcasecmp(argv[i], "FLOAT") + && strcasecmp(argv[i], "TINYINT") + && strcasecmp(argv[i], "BOOL") + && strcasecmp(argv[i], "SMALLINT") + && strcasecmp(argv[i], "BIGINT") + && strcasecmp(argv[i], "DOUBLE") + && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "TIMESTAMP") + && strcasecmp(argv[i], "NCHAR")) { + printHelp(); + errorPrint("%s", "-b: Invalid data_type!\n"); + exit(EXIT_FAILURE); + } + arguments->datatype[0] = argv[i]; + } else { + // more than one col + int index = 0; + g_dupstr = strdup(argv[i]); + char *running = g_dupstr; + char *token = strsep(&running, ","); + while(token != NULL) { + if (strcasecmp(token, "INT") + && strcasecmp(token, "FLOAT") + && strcasecmp(token, "TINYINT") + && strcasecmp(token, "BOOL") + && strcasecmp(token, "SMALLINT") + && strcasecmp(token, "BIGINT") + && strcasecmp(token, "DOUBLE") + && strcasecmp(token, "BINARY") + && strcasecmp(token, "TIMESTAMP") + && strcasecmp(token, "NCHAR")) { + printHelp(); + free(g_dupstr); + errorPrint("%s", "-b: Invalid data_type!\n"); + exit(EXIT_FAILURE); + } + arguments->datatype[index++] = token; + token = strsep(&running, ","); + if (index >= MAX_NUM_COLUMNS) break; + } + arguments->datatype[index] = NULL; + } + } else if (strcmp(argv[i], "-w") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-w need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->len_of_binary = atoi(argv[++i]); + } else if (strcmp(argv[i], "-m") == 0) { + if ((argc == i+1) || + (isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-m need a letter-initial string following!\n"); + exit(EXIT_FAILURE); + } + arguments->tb_prefix = argv[++i]; + } else if (strcmp(argv[i], "-N") == 0) { + arguments->use_metric = false; + } else if (strcmp(argv[i], "-M") == 0) { + arguments->demo_mode = false; + } else if (strcmp(argv[i], "-x") == 0) { + arguments->insert_only = false; + } else if (strcmp(argv[i], "-y") == 0) { + arguments->answer_yes = true; + } else if (strcmp(argv[i], "-g") == 0) { + arguments->debug_print = true; + } else if (strcmp(argv[i], "-gg") == 0) { + arguments->verbose_print = true; + } else if (strcmp(argv[i], "-pp") == 0) { + arguments->performance_print = true; + } else if (strcmp(argv[i], "-O") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-O need a number following!\n"); + exit(EXIT_FAILURE); + } - } else if (strcmp(argv[i], "-b") == 0) { - if (argc == i+1) { - printHelp(); - errorPrint("%s", "\n\t-b need valid string following!\n"); - exit(EXIT_FAILURE); - } - ++i; - if (strstr(argv[i], ",") == NULL) { - // only one col - if (strcasecmp(argv[i], "INT") - && strcasecmp(argv[i], "FLOAT") - && strcasecmp(argv[i], "TINYINT") - && strcasecmp(argv[i], "BOOL") - && strcasecmp(argv[i], "SMALLINT") - && strcasecmp(argv[i], "BIGINT") - && strcasecmp(argv[i], "DOUBLE") - && strcasecmp(argv[i], "BINARY") - && strcasecmp(argv[i], "TIMESTAMP") - && strcasecmp(argv[i], "NCHAR")) { - printHelp(); - errorPrint("%s", "-b: Invalid data_type!\n"); - exit(EXIT_FAILURE); - } - arguments->datatype[0] = argv[i]; - } else { - // more than one col - int index = 0; - g_dupstr = strdup(argv[i]); - char *running = g_dupstr; - char *token = strsep(&running, ","); - while(token != NULL) { - if (strcasecmp(token, "INT") - && strcasecmp(token, "FLOAT") - && strcasecmp(token, "TINYINT") - && strcasecmp(token, "BOOL") - && strcasecmp(token, "SMALLINT") - && strcasecmp(token, "BIGINT") - && strcasecmp(token, "DOUBLE") - && strcasecmp(token, "BINARY") - && strcasecmp(token, "TIMESTAMP") - && strcasecmp(token, "NCHAR")) { - printHelp(); - free(g_dupstr); - errorPrint("%s", "-b: Invalid data_type!\n"); - exit(EXIT_FAILURE); - } - arguments->datatype[index++] = token; - token = strsep(&running, ","); - if (index >= MAX_NUM_DATATYPE) break; - } - arguments->datatype[index] = NULL; - } - } else if (strcmp(argv[i], "-w") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-w need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->len_of_binary = atoi(argv[++i]); - } else if (strcmp(argv[i], "-m") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-m need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->tb_prefix = argv[++i]; - } else if (strcmp(argv[i], "-N") == 0) { - arguments->use_metric = false; - } else if (strcmp(argv[i], "-x") == 0) { - arguments->insert_only = false; - } else if (strcmp(argv[i], "-y") == 0) { - arguments->answer_yes = true; - } else if (strcmp(argv[i], "-g") == 0) { - arguments->debug_print = true; - } else if (strcmp(argv[i], "-gg") == 0) { - arguments->verbose_print = true; - } else if (strcmp(argv[i], "-pp") == 0) { - arguments->performance_print = true; - } else if (strcmp(argv[i], "-O") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-O need a number following!\n"); - exit(EXIT_FAILURE); - } + arguments->disorderRatio = atoi(argv[++i]); + + if (arguments->disorderRatio > 50) { + arguments->disorderRatio = 50; + } - arguments->disorderRatio = atoi(argv[++i]); + if (arguments->disorderRatio < 0) { + arguments->disorderRatio = 0; + } - if (arguments->disorderRatio > 50) { - arguments->disorderRatio = 50; - } + } else if (strcmp(argv[i], "-R") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-R need a number following!\n"); + exit(EXIT_FAILURE); + } - if (arguments->disorderRatio < 0) { - arguments->disorderRatio = 0; - } + arguments->disorderRange = atoi(argv[++i]); + if (arguments->disorderRange < 0) + arguments->disorderRange = 1000; - } else if (strcmp(argv[i], "-R") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-R need a number following!\n"); - exit(EXIT_FAILURE); - } + } else if (strcmp(argv[i], "-a") == 0) { + if ((argc == i+1) || + (!isStringNumber(argv[i+1]))) { + printHelp(); + errorPrint("%s", "\n\t-a need a number following!\n"); + exit(EXIT_FAILURE); + } + arguments->replica = atoi(argv[++i]); + if (arguments->replica > 3 || arguments->replica < 1) { + arguments->replica = 1; + } + } else if (strcmp(argv[i], "-D") == 0) { + arguments->method_of_delete = atoi(argv[++i]); + if (arguments->method_of_delete > 3) { + errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n"); + exit(EXIT_FAILURE); + } + } else if ((strcmp(argv[i], "--version") == 0) || + (strcmp(argv[i], "-V") == 0)){ + printVersion(); + exit(0); + } else if (strcmp(argv[i], "--help") == 0) { + printHelp(); + exit(0); + } else { + printHelp(); + errorPrint("%s", "ERROR: wrong options\n"); + exit(EXIT_FAILURE); + } + } - arguments->disorderRange = atoi(argv[++i]); - if (arguments->disorderRange < 0) - arguments->disorderRange = 1000; + int columnCount; + for (columnCount = 0; columnCount < MAX_NUM_COLUMNS; columnCount ++) { + if (g_args.datatype[columnCount] == NULL) { + break; + } + } - } else if (strcmp(argv[i], "-a") == 0) { - if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { - printHelp(); - errorPrint("%s", "\n\t-a need a number following!\n"); - exit(EXIT_FAILURE); - } - arguments->replica = atoi(argv[++i]); - if (arguments->replica > 3 || arguments->replica < 1) { - arguments->replica = 1; - } - } else if (strcmp(argv[i], "-D") == 0) { - arguments->method_of_delete = atoi(argv[++i]); - if (arguments->method_of_delete > 3) { - errorPrint("%s", "\n\t-D need a valud (0~3) number following!\n"); - exit(EXIT_FAILURE); - } - } else if ((strcmp(argv[i], "--version") == 0) || - (strcmp(argv[i], "-V") == 0)){ - printVersion(); - exit(0); - } else if (strcmp(argv[i], "--help") == 0) { - printHelp(); - exit(0); - } else { - printHelp(); - errorPrint("%s", "ERROR: wrong options\n"); - exit(EXIT_FAILURE); - } - } - - int columnCount; - for (columnCount = 0; columnCount < MAX_NUM_DATATYPE; columnCount ++) { - if (g_args.datatype[columnCount] == NULL) { - break; - } - } - - if (0 == columnCount) { - perror("data type error!"); - exit(-1); - } - g_args.num_of_CPR = columnCount; - - if (((arguments->debug_print) && (arguments->metaFile == NULL)) - || arguments->verbose_print) { - printf("###################################################################\n"); - printf("# meta file: %s\n", arguments->metaFile); - printf("# Server IP: %s:%hu\n", - arguments->host == NULL ? "localhost" : arguments->host, - arguments->port ); - printf("# User: %s\n", arguments->user); - printf("# Password: %s\n", arguments->password); - printf("# Use metric: %s\n", - arguments->use_metric ? "true" : "false"); - if (*(arguments->datatype)) { - printf("# Specified data type: "); - for (int i = 0; i < MAX_NUM_DATATYPE; i++) - if (arguments->datatype[i]) - printf("%s,", arguments->datatype[i]); - else - break; - printf("\n"); + if (0 == columnCount) { + perror("data type error!"); + exit(-1); } - printf("# Insertion interval: %"PRIu64"\n", - arguments->insert_interval); - printf("# Number of records per req: %u\n", - arguments->num_of_RPR); - printf("# Max SQL length: %"PRIu64"\n", - arguments->max_sql_len); - printf("# Length of Binary: %d\n", arguments->len_of_binary); - printf("# Number of Threads: %d\n", arguments->num_of_threads); - printf("# Number of Tables: %"PRId64"\n", - arguments->num_of_tables); - printf("# Number of Data per Table: %"PRId64"\n", - arguments->num_of_DPT); - printf("# Database name: %s\n", arguments->database); - printf("# Table prefix: %s\n", arguments->tb_prefix); - if (arguments->disorderRatio) { - printf("# Data order: %d\n", arguments->disorderRatio); - printf("# Data out of order rate: %d\n", arguments->disorderRange); - - } - printf("# Delete method: %d\n", arguments->method_of_delete); - printf("# Answer yes when prompt: %d\n", arguments->answer_yes); - printf("# Print debug info: %d\n", arguments->debug_print); - printf("# Print verbose info: %d\n", arguments->verbose_print); - printf("###################################################################\n"); + g_args.num_of_CPR = columnCount; + + if (((arguments->debug_print) && (arguments->metaFile == NULL)) + || arguments->verbose_print) { + printf("###################################################################\n"); + printf("# meta file: %s\n", arguments->metaFile); + printf("# Server IP: %s:%hu\n", + arguments->host == NULL ? "localhost" : arguments->host, + arguments->port ); + printf("# User: %s\n", arguments->user); + printf("# Password: %s\n", arguments->password); + printf("# Use metric: %s\n", + arguments->use_metric ? "true" : "false"); + if (*(arguments->datatype)) { + printf("# Specified data type: "); + for (int i = 0; i < MAX_NUM_COLUMNS; i++) + if (arguments->datatype[i]) + printf("%s,", arguments->datatype[i]); + else + break; + printf("\n"); + } + printf("# Insertion interval: %"PRIu64"\n", + arguments->insert_interval); + printf("# Number of records per req: %u\n", + arguments->num_of_RPR); + printf("# Max SQL length: %"PRIu64"\n", + arguments->max_sql_len); + printf("# Length of Binary: %d\n", arguments->len_of_binary); + printf("# Number of Threads: %d\n", arguments->num_of_threads); + printf("# Number of Tables: %"PRId64"\n", + arguments->num_of_tables); + printf("# Number of Data per Table: %"PRId64"\n", + arguments->num_of_DPT); + printf("# Database name: %s\n", arguments->database); + printf("# Table prefix: %s\n", arguments->tb_prefix); + if (arguments->disorderRatio) { + printf("# Data order: %d\n", arguments->disorderRatio); + printf("# Data out of order rate: %d\n", arguments->disorderRange); + } + printf("# Delete method: %d\n", arguments->method_of_delete); + printf("# Answer yes when prompt: %d\n", arguments->answer_yes); + printf("# Print debug info: %d\n", arguments->debug_print); + printf("# Print verbose info: %d\n", arguments->verbose_print); + printf("###################################################################\n"); - prompt(); - } + prompt(); + } } -static bool getInfoFromJsonFile(char* file); -static void init_rand_data(); static void tmfclose(FILE *fp) { - if (NULL != fp) { - fclose(fp); - } + if (NULL != fp) { + fclose(fp); + } } static void tmfree(char *buf) { - if (NULL != buf) { - free(buf); - } + if (NULL != buf) { + free(buf); + } } static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; + int i; + TAOS_RES *res = NULL; + int32_t code = -1; + + for (i = 0; i < 5 /* retry */; i++) { + if (NULL != res) { + taos_free_result(res); + res = NULL; + } - for (i = 0; i < 5 /* retry */; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; + res = taos_query(taos, command); + code = taos_errno(res); + if (0 == code) { + break; + } } - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; + verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); + if (code != 0) { + if (!quiet) { + errorPrint("Failed to execute %s, reason: %s\n", + command, taos_errstr(res)); + } + taos_free_result(res); + //taos_close(taos); + return -1; } - } - verbosePrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - if (code != 0) { - if (!quiet) { - errorPrint("Failed to execute %s, reason: %s\n", - command, taos_errstr(res)); + if (INSERT_TYPE == type) { + int affectedRows = taos_affected_rows(res); + taos_free_result(res); + return affectedRows; } - taos_free_result(res); - //taos_close(taos); - return -1; - } - if (INSERT_TYPE == type) { - int affectedRows = taos_affected_rows(res); taos_free_result(res); - return affectedRows; - } - - taos_free_result(res); - return 0; + return 0; } static void appendResultBufToFile(char *resultBuf, threadInfo *pThreadInfo) { - pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); - if (pThreadInfo->fp == NULL) { - errorPrint( - "%s() LN%d, failed to open result file: %s, result will not save to file\n", - __func__, __LINE__, pThreadInfo->filePath); - return; - } - - fprintf(pThreadInfo->fp, "%s", resultBuf); - tmfclose(pThreadInfo->fp); - pThreadInfo->fp = NULL; + pThreadInfo->fp = fopen(pThreadInfo->filePath, "at"); + if (pThreadInfo->fp == NULL) { + errorPrint( + "%s() LN%d, failed to open result file: %s, result will not save to file\n", + __func__, __LINE__, pThreadInfo->filePath); + return; + } + + fprintf(pThreadInfo->fp, "%s", resultBuf); + tmfclose(pThreadInfo->fp); + pThreadInfo->fp = NULL; } static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { - TAOS_ROW row = NULL; - int num_rows = 0; - int num_fields = taos_field_count(res); - TAOS_FIELD *fields = taos_fetch_fields(res); - - char* databuf = (char*) calloc(1, 100*1024*1024); - if (databuf == NULL) { - errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", - __func__, __LINE__); - return ; - } - - int totalLen = 0; - - // fetch the records row by row - while((row = taos_fetch_row(res))) { - if ((strlen(pThreadInfo->filePath) > 0) - && (totalLen >= 100*1024*1024 - 32000)) { + TAOS_ROW row = NULL; + int num_rows = 0; + int num_fields = taos_field_count(res); + TAOS_FIELD *fields = taos_fetch_fields(res); + + char* databuf = (char*) calloc(1, 100*1024*1024); + if (databuf == NULL) { + errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", + __func__, __LINE__); + return ; + } + + int64_t totalLen = 0; + + // fetch the records row by row + while((row = taos_fetch_row(res))) { + if (totalLen >= 100*1024*1024 - 32000) { + if (strlen(pThreadInfo->filePath) > 0) + appendResultBufToFile(databuf, pThreadInfo); + totalLen = 0; + memset(databuf, 0, 100*1024*1024); + } + num_rows++; + char temp[16000] = {0}; + int len = taos_print_row(temp, row, fields, num_fields); + len += sprintf(temp + len, "\n"); + //printf("query result:%s\n", temp); + memcpy(databuf + totalLen, temp, len); + totalLen += len; + verbosePrint("%s() LN%d, totalLen: %"PRId64"\n", + __func__, __LINE__, totalLen); + } + + verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", + __func__, __LINE__, databuf, pThreadInfo->filePath); + if (strlen(pThreadInfo->filePath) > 0) { appendResultBufToFile(databuf, pThreadInfo); - totalLen = 0; - memset(databuf, 0, 100*1024*1024); - } - num_rows++; - char temp[16000] = {0}; - int len = taos_print_row(temp, row, fields, num_fields); - len += sprintf(temp + len, "\n"); - //printf("query result:%s\n", temp); - memcpy(databuf + totalLen, temp, len); - totalLen += len; - } - - verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", - __func__, __LINE__, databuf, pThreadInfo->filePath); - if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(databuf, pThreadInfo); - } - free(databuf); + } + free(databuf); } static void selectAndGetResult( threadInfo *pThreadInfo, char *command) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", - __func__, __LINE__, command, taos_errstr(res)); + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", strlen("taosc"))) { + TAOS_RES *res = taos_query(pThreadInfo->taos, command); + if (res == NULL || taos_errno(res) != 0) { + errorPrint("%s() LN%d, failed to execute sql:%s, reason:%s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + return; + } + + fetchResult(res, pThreadInfo); taos_free_result(res); - return; - } - fetchResult(res, pThreadInfo); - taos_free_result(res); + } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + int retCode = postProceSql( + g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, + command, + pThreadInfo); + if (0 != retCode) { + printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); + } - } else if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - int retCode = postProceSql( - g_queryInfo.host, &(g_queryInfo.serv_addr), g_queryInfo.port, - command, - pThreadInfo); - if (0 != retCode) { - printf("====restful return fail, threadID[%d]\n", pThreadInfo->threadID); - } - - } else { - errorPrint("%s() LN%d, unknown query mode: %s\n", - __func__, __LINE__, g_queryInfo.queryMode); - } + } else { + errorPrint("%s() LN%d, unknown query mode: %s\n", + __func__, __LINE__, g_queryInfo.queryMode); + } } static int32_t rand_bool(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 2; + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 2; } static int32_t rand_tinyint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 128; + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 128; } static int32_t rand_smallint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor] % 32767; + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor] % 32767; } static int32_t rand_int(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randint[cursor]; + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randint[cursor]; } static int64_t rand_bigint(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randbigint[cursor]; + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randbigint[cursor]; } static float rand_float(){ - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randfloat[cursor]; + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randfloat[cursor]; +} + +static float demo_current_float(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return (float)(9.8 + 0.04 * (randint[cursor] % 10) + randfloat[cursor]/1000000000); +} + +static int32_t demo_voltage_int(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return 215 + randint[cursor] % 10; +} + +static float demo_phase_float(){ + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return (float)((115 + randint[cursor] % 10 + randfloat[cursor]/1000000000)/360); } #if 0 @@ -1317,47 +1377,46 @@ static const char charNum[] = "0123456789"; static void nonrand_string(char *, int) __attribute__ ((unused)); // reserve for debugging purpose static void nonrand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - int n; - for (n = 0; n < size; n++) { - str[n] = charNum[n % 10]; - } - str[n] = 0; - } + str[0] = 0; + if (size > 0) { + int n; + for (n = 0; n < size; n++) { + str[n] = charNum[n % 10]; + } + str[n] = 0; + } } #endif static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; static void rand_string(char *str, int size) { - str[0] = 0; - if (size > 0) { - //--size; - int n; - for (n = 0; n < size; n++) { - int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); - str[n] = charset[key]; - } - str[n] = 0; - } + str[0] = 0; + if (size > 0) { + //--size; + int n; + for (n = 0; n < size; n++) { + int key = abs(rand_tinyint()) % (int)(sizeof(charset) - 1); + str[n] = charset[key]; + } + str[n] = 0; + } } static double rand_double() { - static int cursor; - cursor++; - cursor = cursor % MAX_PREPARED_RAND; - return randdouble[cursor]; - + static int cursor; + cursor++; + cursor = cursor % MAX_PREPARED_RAND; + return randdouble[cursor]; } static void init_rand_data() { - for (int i = 0; i < MAX_PREPARED_RAND; i++){ - randint[i] = (int)(taosRandom() % 65535); - randbigint[i] = (int64_t)(taosRandom() % 2147483648); - randfloat[i] = (float)(taosRandom() / 1000.0); - randdouble[i] = (double)(taosRandom() / 1000000.0); - } + for (int i = 0; i < MAX_PREPARED_RAND; i++){ + randint[i] = (int)(taosRandom() % 65535); + randbigint[i] = (int64_t)(taosRandom() % 2147483648); + randfloat[i] = (float)(taosRandom() / 1000.0); + randdouble[i] = (double)(taosRandom() / 1000000.0); + } } #define SHOW_PARSE_RESULT_START() \ @@ -1383,759 +1442,781 @@ static void init_rand_data() { static int printfInsertMeta() { SHOW_PARSE_RESULT_START(); - printf("interface: \033[33m%s\033[0m\n", - (g_args.iface==TAOSC_IFACE)?"taosc":(g_args.iface==REST_IFACE)?"rest":"stmt"); - printf("host: \033[33m%s:%u\033[0m\n", - g_Dbs.host, g_Dbs.port); - printf("user: \033[33m%s\033[0m\n", g_Dbs.user); - printf("password: \033[33m%s\033[0m\n", g_Dbs.password); - printf("configDir: \033[33m%s\033[0m\n", configDir); - printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); - printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); - printf("thread num of create table: \033[33m%d\033[0m\n", - g_Dbs.threadCountByCreateTbl); - printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", - g_args.insert_interval); - printf("number of records per req: \033[33m%u\033[0m\n", - g_args.num_of_RPR); - printf("max sql length: \033[33m%"PRIu64"\033[0m\n", - g_args.max_sql_len); - - printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - printf("database[\033[33m%d\033[0m]:\n", i); - printf(" database[%d] name: \033[33m%s\033[0m\n", - i, g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); - } else { - printf(" drop: \033[33myes\033[0m\n"); - } + if (g_args.demo_mode) + printf("\ntaosdemo is simulating data generated by power equipments monitoring...\n\n"); + else + printf("\ntaosdemo is simulating random data as you request..\n\n"); + + if (g_args.iface != INTERFACE_BUT) { + // first time if no iface specified + printf("interface: \033[33m%s\033[0m\n", + (g_args.iface==TAOSC_IFACE)?"taosc": + (g_args.iface==REST_IFACE)?"rest":"stmt"); + } + + printf("host: \033[33m%s:%u\033[0m\n", + g_Dbs.host, g_Dbs.port); + printf("user: \033[33m%s\033[0m\n", g_Dbs.user); + printf("password: \033[33m%s\033[0m\n", g_Dbs.password); + printf("configDir: \033[33m%s\033[0m\n", configDir); + printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); + printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); + printf("thread num of create table: \033[33m%d\033[0m\n", + g_Dbs.threadCountByCreateTbl); + printf("top insert interval: \033[33m%"PRIu64"\033[0m\n", + g_args.insert_interval); + printf("number of records per req: \033[33m%u\033[0m\n", + g_args.num_of_RPR); + printf("max sql length: \033[33m%"PRIu64"\033[0m\n", + g_args.max_sql_len); + + printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); - if (g_Dbs.db[i].dbCfg.blocks > 0) { - printf(" blocks: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - printf(" cache: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - printf(" days: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - printf(" keep: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - printf(" replica: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - printf(" update: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - printf(" minRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - printf(" maxRows: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - printf(" walLevel: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - printf(" fsync: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - printf(" quorum: \033[33m%d\033[0m\n", - g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - printf(" precision: \033[33m%s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - } else { - printf("\033[1m\033[40;31m precision error: %s\033[0m\n", - g_Dbs.db[i].dbCfg.precision); - return -1; - } - } - - printf(" super table count: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTblCount); - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sTblName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); - } else if (AUTO_CREATE_SUBTBL == - g_Dbs.db[i].superTbls[j].autoCreateTable) { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "no"); - } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); - } else { - printf(" childTblExists: \033[33m%s\033[0m\n", "error"); - } - - printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].dataSource); - printf(" iface: \033[33m%s\033[0m\n", - (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": - (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); - if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblLimit); - } - if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { - printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTbls[j].childTblOffset); - } - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].insertRows); -/* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); - }else { - printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); - } - */ - printf(" interlaceRows: \033[33m%u\033[0m\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } - - printf(" disorderRange: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRange); - printf(" disorderRatio: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - printf(" startTimestamp: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - printf(" sampleFormat: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - printf(" sampleFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].sampleFile); - printf(" tagsFile: \033[33m%s\033[0m\n", - g_Dbs.db[i].superTbls[j].tagsFile); - printf(" columnCount: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", 6)) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", 5))) { - printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); + for (int i = 0; i < g_Dbs.dbCount; i++) { + printf("database[\033[33m%d\033[0m]:\n", i); + printf(" database[%d] name: \033[33m%s\033[0m\n", + i, g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + printf(" drop: \033[33mno\033[0m\n"); } else { - printf("column[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType); + printf(" drop: \033[33myes\033[0m\n"); } - } - printf("\n"); - printf(" tagCount: \033[33m%d\033[0m\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - printf("tag[%d]:\033[33m%s\033[0m ", k, - g_Dbs.db[i].superTbls[j].tags[k].dataType); + if (g_Dbs.db[i].dbCfg.blocks > 0) { + printf(" blocks: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + printf(" cache: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + printf(" days: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + printf(" keep: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + printf(" replica: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + printf(" update: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + printf(" minRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + printf(" maxRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + printf(" comp: \033[33m%d\033[0m\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + printf(" walLevel: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + printf(" fsync: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + printf(" quorum: \033[33m%d\033[0m\n", + g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) +#if NANO_SECOND_ENABLED == 1 + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2)) +#endif + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2))) { + printf(" precision: \033[33m%s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); + } else { + printf("\033[1m\033[40;31m precision error: %s\033[0m\n", + g_Dbs.db[i].dbCfg.precision); + return -1; + } } - } - printf("\n"); - } - printf("\n"); - } - SHOW_PARSE_RESULT_END(); + printf(" super table count: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTblCount); + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + printf(" super table[\033[33m%"PRIu64"\033[0m]:\n", j); - return 0; -} + printf(" stbName: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sTblName); -static void printfInsertMetaToFile(FILE* fp) { + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); + } else if (AUTO_CREATE_SUBTBL == + g_Dbs.db[i].superTbls[j].autoCreateTable) { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); + } - SHOW_PARSE_RESULT_START_TO_FILE(fp); - - fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); - fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "configDir: %s\n", configDir); - fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); - fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); - fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR); - fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); - fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - - for (int i = 0; i < g_Dbs.dbCount; i++) { - fprintf(fp, "database[%d]:\n", i); - fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); - if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); - }else { - fprintf(fp, " drop: yes\n"); - } - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); - } - if (g_Dbs.db[i].dbCfg.minRows > 0) { - fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); - } - if (g_Dbs.db[i].dbCfg.quorum > 0) { - fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.precision[0] != 0) { - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { - fprintf(fp, " precision: %s\n", - g_Dbs.db[i].dbCfg.precision); - } else { - fprintf(fp, " precision error: %s\n", - g_Dbs.db[i].dbCfg.precision); - } - } - - fprintf(fp, " super table count: %"PRIu64"\n", - g_Dbs.db[i].superTblCount); - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", - g_Dbs.db[i].superTbls[j].sTblName); - - if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "no"); - } else if (AUTO_CREATE_SUBTBL - == g_Dbs.db[i].superTbls[j].autoCreateTable) { - fprintf(fp, " autoCreateTable: %s\n", "yes"); - } else { - fprintf(fp, " autoCreateTable: %s\n", "error"); - } - - if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "no"); - } else if (TBL_ALREADY_EXISTS - == g_Dbs.db[i].superTbls[j].childTblExists) { - fprintf(fp, " childTblExists: %s\n", "yes"); - } else { - fprintf(fp, " childTblExists: %s\n", "error"); - } - - fprintf(fp, " childTblCount: %"PRId64"\n", - g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", - g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", - g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " iface: %s\n", - (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": - (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); - fprintf(fp, " insertRows: %"PRId64"\n", - g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " interlace rows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { - fprintf(fp, " stable insert interval: %"PRIu64"\n", - g_Dbs.db[i].superTbls[j].insertInterval); - } -/* - if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); - }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); - } - */ - fprintf(fp, " interlaceRows: %u\n", - g_Dbs.db[i].superTbls[j].interlaceRows); - fprintf(fp, " disorderRange: %d\n", - g_Dbs.db[i].superTbls[j].disorderRange); - fprintf(fp, " disorderRatio: %d\n", - g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %"PRIu64"\n", - g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %"PRId64"\n", - g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", - g_Dbs.db[i].superTbls[j].startTimestamp); - fprintf(fp, " sampleFormat: %s\n", - g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", - g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", - g_Dbs.db[i].superTbls[j].tagsFile); - - fprintf(fp, " columnCount: %d\n ", - g_Dbs.db[i].superTbls[j].columnCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); - if ((0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp( - g_Dbs.db[i].superTbls[j].columns[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "column[%d]:%s(%d) ", k, - g_Dbs.db[i].superTbls[j].columns[k].dataType, - g_Dbs.db[i].superTbls[j].columns[k].dataLen); - } else { - fprintf(fp, "column[%d]:%s ", - k, g_Dbs.db[i].superTbls[j].columns[k].dataType); - } - } - fprintf(fp, "\n"); + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "no"); + } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + printf(" childTblExists: \033[33m%s\033[0m\n", "yes"); + } else { + printf(" childTblExists: \033[33m%s\033[0m\n", "error"); + } - fprintf(fp, " tagCount: %d\n ", - g_Dbs.db[i].superTbls[j].tagCount); - for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { - //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "binary", strlen("binary"))) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, - "nchar", strlen("nchar")))) { - fprintf(fp, "tag[%d]:%s(%d) ", - k, g_Dbs.db[i].superTbls[j].tags[k].dataType, - g_Dbs.db[i].superTbls[j].tags[k].dataLen); - } else { - fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); + printf(" childTblCount: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblCount); + printf(" childTblPrefix: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblPrefix); + printf(" dataSource: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].dataSource); + printf(" iface: \033[33m%s\033[0m\n", + (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": + (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); + if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { + printf(" childTblLimit: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblLimit); + } + if (g_Dbs.db[i].superTbls[j].childTblOffset > 0) { + printf(" childTblOffset: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblOffset); + } + printf(" insertRows: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].insertRows); + /* + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); + }else { + printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); + } + */ + printf(" interlaceRows: \033[33m%u\033[0m\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + + if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { + printf(" stable insert interval: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTbls[j].insertInterval); + } + + printf(" disorderRange: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].disorderRange); + printf(" disorderRatio: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].disorderRatio); + printf(" maxSqlLen: \033[33m%"PRIu64"\033[0m\n", + g_Dbs.db[i].superTbls[j].maxSqlLen); + printf(" timeStampStep: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].timeStampStep); + printf(" startTimestamp: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].startTimestamp); + printf(" sampleFormat: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sampleFormat); + printf(" sampleFile: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].sampleFile); + printf(" tagsFile: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].tagsFile); + printf(" columnCount: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + "binary", 6)) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].columns[k].dataType, + "nchar", 5))) { + printf("column[\033[33m%d\033[0m]:\033[33m%s(%d)\033[0m ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType, + g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + printf("column[%d]:\033[33m%s\033[0m ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + printf("\n"); + + printf(" tagCount: \033[33m%d\033[0m\n ", + g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { + printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, + g_Dbs.db[i].superTbls[j].tags[k].dataType, + g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + printf("tag[%d]:\033[33m%s\033[0m ", k, + g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + printf("\n"); } - } - fprintf(fp, "\n"); + printf("\n"); } - fprintf(fp, "\n"); - } - SHOW_PARSE_RESULT_END_TO_FILE(fp); -} - -static void printfQueryMeta() { + SHOW_PARSE_RESULT_END(); - SHOW_PARSE_RESULT_START(); - - printf("host: \033[33m%s:%u\033[0m\n", - g_queryInfo.host, g_queryInfo.port); - printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); - - printf("\n"); - - if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { - printf("specified table query info: \n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.sqlCount); - if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { - printf("specified tbl query times:\n"); - printf(" \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryTimes); - printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", - g_queryInfo.specifiedQueryInfo.queryInterval); - printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); - printf("concurrent: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.concurrent); - printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync"); - printf("interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.specifiedQueryInfo.sql[i]); - } - printf("\n"); - } - - printf("super table query info:\n"); - printf("sqlCount: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.sqlCount); - - if (g_queryInfo.superQueryInfo.sqlCount > 0) { - printf("query interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.queryInterval); - printf("threadCnt: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.threadCnt); - printf("childTblCount: \033[33m%"PRId64"\033[0m\n", - g_queryInfo.superQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", - g_queryInfo.superQueryInfo.sTblName); - printf("stb query times:\033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.queryTimes); - - printf("mod: \033[33m%s\033[0m\n", - (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync"); - printf("interval: \033[33m%"PRIu64"\033[0m\n", - g_queryInfo.superQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress); - - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", - i, g_queryInfo.superQueryInfo.sql[i]); - } - printf("\n"); - } - } - - SHOW_PARSE_RESULT_END(); + return 0; } -static char* formatTimestamp(char* buf, int64_t val, int precision) { - time_t tt; - if (precision == TSDB_TIME_PRECISION_NANO) { - tt = (time_t)(val / 1000000000); - } else if (precision == TSDB_TIME_PRECISION_MICRO) { - tt = (time_t)(val / 1000000); - } else { - tt = (time_t)(val / 1000); - } - -/* comment out as it make testcases like select_with_tags.sim fail. - but in windows, this may cause the call to localtime crash if tt < 0, - need to find a better solution. - if (tt < 0) { - tt = 0; - } - */ +static void printfInsertMetaToFile(FILE* fp) { -#ifdef WINDOWS - if (tt < 0) tt = 0; -#endif + SHOW_PARSE_RESULT_START_TO_FILE(fp); - struct tm* ptm = localtime(&tt); - size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); + fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); + fprintf(fp, "user: %s\n", g_Dbs.user); + fprintf(fp, "configDir: %s\n", configDir); + fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); + fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); + fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); + fprintf(fp, "number of records per req: %u\n", g_args.num_of_RPR); + fprintf(fp, "max sql length: %"PRIu64"\n", g_args.max_sql_len); + fprintf(fp, "database count: %d\n", g_Dbs.dbCount); - if (precision == TSDB_TIME_PRECISION_NANO) { - sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); - } else if (precision == TSDB_TIME_PRECISION_MICRO) { - sprintf(buf + pos, ".%06d", (int)(val % 1000000)); - } else { - sprintf(buf + pos, ".%03d", (int)(val % 1000)); - } + for (int i = 0; i < g_Dbs.dbCount; i++) { + fprintf(fp, "database[%d]:\n", i); + fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); + if (0 == g_Dbs.db[i].drop) { + fprintf(fp, " drop: no\n"); + }else { + fprintf(fp, " drop: yes\n"); + } + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + fprintf(fp, " blocks: %d\n", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + fprintf(fp, " cache: %d\n", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + fprintf(fp, " days: %d\n", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + fprintf(fp, " keep: %d\n", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + fprintf(fp, " replica: %d\n", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + fprintf(fp, " update: %d\n", g_Dbs.db[i].dbCfg.update); + } + if (g_Dbs.db[i].dbCfg.minRows > 0) { + fprintf(fp, " minRows: %d\n", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + fprintf(fp, " maxRows: %d\n", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + fprintf(fp, " comp: %d\n", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + fprintf(fp, " walLevel: %d\n", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + fprintf(fp, " fsync: %d\n", g_Dbs.db[i].dbCfg.fsync); + } + if (g_Dbs.db[i].dbCfg.quorum > 0) { + fprintf(fp, " quorum: %d\n", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.precision[0] != 0) { + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", 2)) +#if NANO_SECOND_ENABLED == 1 + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ns", 2)) +#endif + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "us", 2))) { + fprintf(fp, " precision: %s\n", + g_Dbs.db[i].dbCfg.precision); + } else { + fprintf(fp, " precision error: %s\n", + g_Dbs.db[i].dbCfg.precision); + } + } + + fprintf(fp, " super table count: %"PRIu64"\n", + g_Dbs.db[i].superTblCount); + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + fprintf(fp, " super table[%d]:\n", j); + + fprintf(fp, " stbName: %s\n", + g_Dbs.db[i].superTbls[j].sTblName); + + if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "no"); + } else if (AUTO_CREATE_SUBTBL + == g_Dbs.db[i].superTbls[j].autoCreateTable) { + fprintf(fp, " autoCreateTable: %s\n", "yes"); + } else { + fprintf(fp, " autoCreateTable: %s\n", "error"); + } + + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "no"); + } else if (TBL_ALREADY_EXISTS + == g_Dbs.db[i].superTbls[j].childTblExists) { + fprintf(fp, " childTblExists: %s\n", "yes"); + } else { + fprintf(fp, " childTblExists: %s\n", "error"); + } + + fprintf(fp, " childTblCount: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].childTblCount); + fprintf(fp, " childTblPrefix: %s\n", + g_Dbs.db[i].superTbls[j].childTblPrefix); + fprintf(fp, " dataSource: %s\n", + g_Dbs.db[i].superTbls[j].dataSource); + fprintf(fp, " iface: %s\n", + (g_Dbs.db[i].superTbls[j].iface==TAOSC_IFACE)?"taosc": + (g_Dbs.db[i].superTbls[j].iface==REST_IFACE)?"rest":"stmt"); + fprintf(fp, " insertRows: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].insertRows); + fprintf(fp, " interlace rows: %u\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { + fprintf(fp, " stable insert interval: %"PRIu64"\n", + g_Dbs.db[i].superTbls[j].insertInterval); + } + /* + if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { + fprintf(fp, " multiThreadWriteOneTbl: no\n"); + }else { + fprintf(fp, " multiThreadWriteOneTbl: yes\n"); + } + */ + fprintf(fp, " interlaceRows: %u\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + fprintf(fp, " disorderRange: %d\n", + g_Dbs.db[i].superTbls[j].disorderRange); + fprintf(fp, " disorderRatio: %d\n", + g_Dbs.db[i].superTbls[j].disorderRatio); + fprintf(fp, " maxSqlLen: %"PRIu64"\n", + g_Dbs.db[i].superTbls[j].maxSqlLen); + + fprintf(fp, " timeStampStep: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", + g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " sampleFormat: %s\n", + g_Dbs.db[i].superTbls[j].sampleFormat); + fprintf(fp, " sampleFile: %s\n", + g_Dbs.db[i].superTbls[j].sampleFile); + fprintf(fp, " tagsFile: %s\n", + g_Dbs.db[i].superTbls[j].tagsFile); + + fprintf(fp, " columnCount: %d\n ", + g_Dbs.db[i].superTbls[j].columnCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); + if ((0 == strncasecmp( + g_Dbs.db[i].superTbls[j].columns[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp( + g_Dbs.db[i].superTbls[j].columns[k].dataType, + "nchar", strlen("nchar")))) { + fprintf(fp, "column[%d]:%s(%d) ", k, + g_Dbs.db[i].superTbls[j].columns[k].dataType, + g_Dbs.db[i].superTbls[j].columns[k].dataLen); + } else { + fprintf(fp, "column[%d]:%s ", + k, g_Dbs.db[i].superTbls[j].columns[k].dataType); + } + } + fprintf(fp, "\n"); + + fprintf(fp, " tagCount: %d\n ", + g_Dbs.db[i].superTbls[j].tagCount); + for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { + //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { + fprintf(fp, "tag[%d]:%s(%d) ", + k, g_Dbs.db[i].superTbls[j].tags[k].dataType, + g_Dbs.db[i].superTbls[j].tags[k].dataLen); + } else { + fprintf(fp, "tag[%d]:%s ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType); + } + } + fprintf(fp, "\n"); + } + fprintf(fp, "\n"); + } + + SHOW_PARSE_RESULT_END_TO_FILE(fp); +} + +static void printfQueryMeta() { + + SHOW_PARSE_RESULT_START(); + + printf("host: \033[33m%s:%u\033[0m\n", + g_queryInfo.host, g_queryInfo.port); + printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); + printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); + + printf("\n"); + + if ((SUBSCRIBE_TEST == g_args.test_mode) || (QUERY_TEST == g_args.test_mode)) { + printf("specified table query info: \n"); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.sqlCount); + if (g_queryInfo.specifiedQueryInfo.sqlCount > 0) { + printf("specified tbl query times:\n"); + printf(" \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryTimes); + printf("query interval: \033[33m%"PRIu64" ms\033[0m\n", + g_queryInfo.specifiedQueryInfo.queryInterval); + printf("top query times:\033[33m%"PRIu64"\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.concurrent); + printf("mod: \033[33m%s\033[0m\n", + (g_queryInfo.specifiedQueryInfo.asyncMode)?"async":"sync"); + printf("interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.specifiedQueryInfo.sql[i]); + } + printf("\n"); + } + + printf("super table query info:\n"); + printf("sqlCount: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.sqlCount); + + if (g_queryInfo.superQueryInfo.sqlCount > 0) { + printf("query interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.queryInterval); + printf("threadCnt: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%"PRId64"\033[0m\n", + g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", + g_queryInfo.superQueryInfo.sTblName); + printf("stb query times:\033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.queryTimes); + + printf("mod: \033[33m%s\033[0m\n", + (g_queryInfo.superQueryInfo.asyncMode)?"async":"sync"); + printf("interval: \033[33m%"PRIu64"\033[0m\n", + g_queryInfo.superQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", + g_queryInfo.superQueryInfo.subscribeKeepProgress); + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", + i, g_queryInfo.superQueryInfo.sql[i]); + } + printf("\n"); + } + } + + SHOW_PARSE_RESULT_END(); +} + +static char* formatTimestamp(char* buf, int64_t val, int precision) { + time_t tt; + if (precision == TSDB_TIME_PRECISION_MICRO) { + tt = (time_t)(val / 1000000); +#if NANO_SECOND_ENABLED == 1 + } if (precision == TSDB_TIME_PRECISION_NANO) { + tt = (time_t)(val / 1000000000); +#endif + } else { + tt = (time_t)(val / 1000); + } + + /* comment out as it make testcases like select_with_tags.sim fail. + but in windows, this may cause the call to localtime crash if tt < 0, + need to find a better solution. + if (tt < 0) { + tt = 0; + } + */ + +#ifdef WINDOWS + if (tt < 0) tt = 0; +#endif + + struct tm* ptm = localtime(&tt); + size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm); + + if (precision == TSDB_TIME_PRECISION_MICRO) { + sprintf(buf + pos, ".%06d", (int)(val % 1000000)); +#if NANO_SECOND_ENABLED == 1 + } else if (precision == TSDB_TIME_PRECISION_NANO) { + sprintf(buf + pos, ".%09d", (int)(val % 1000000000)); +#endif + } else { + sprintf(buf + pos, ".%03d", (int)(val % 1000)); + } - return buf; + return buf; } static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { - if (val == NULL) { - fprintf(fp, "%s", TSDB_DATA_NULL_STR); - return; - } - - char buf[TSDB_MAX_BYTES_PER_ROW]; - switch (field->type) { - case TSDB_DATA_TYPE_BOOL: - fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - fprintf(fp, "%d", *((int8_t *)val)); - break; - case TSDB_DATA_TYPE_SMALLINT: - fprintf(fp, "%d", *((int16_t *)val)); - break; - case TSDB_DATA_TYPE_INT: - fprintf(fp, "%d", *((int32_t *)val)); - break; - case TSDB_DATA_TYPE_BIGINT: - fprintf(fp, "%" PRId64, *((int64_t *)val)); - break; - case TSDB_DATA_TYPE_FLOAT: - fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); - break; - case TSDB_DATA_TYPE_DOUBLE: - fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); - break; - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(buf, val, length); - buf[length] = 0; - fprintf(fp, "\'%s\'", buf); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - formatTimestamp(buf, *(int64_t*)val, precision); - fprintf(fp, "'%s'", buf); - break; - default: - break; - } + if (val == NULL) { + fprintf(fp, "%s", TSDB_DATA_NULL_STR); + return; + } + + char buf[TSDB_MAX_BYTES_PER_ROW]; + switch (field->type) { + case TSDB_DATA_TYPE_BOOL: + fprintf(fp, "%d", ((((int32_t)(*((char *)val))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + fprintf(fp, "%d", *((int8_t *)val)); + break; + case TSDB_DATA_TYPE_SMALLINT: + fprintf(fp, "%d", *((int16_t *)val)); + break; + case TSDB_DATA_TYPE_INT: + fprintf(fp, "%d", *((int32_t *)val)); + break; + case TSDB_DATA_TYPE_BIGINT: + fprintf(fp, "%" PRId64, *((int64_t *)val)); + break; + case TSDB_DATA_TYPE_FLOAT: + fprintf(fp, "%.5f", GET_FLOAT_VAL(val)); + break; + case TSDB_DATA_TYPE_DOUBLE: + fprintf(fp, "%.9f", GET_DOUBLE_VAL(val)); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(buf, val, length); + buf[length] = 0; + fprintf(fp, "\'%s\'", buf); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + formatTimestamp(buf, *(int64_t*)val, precision); + fprintf(fp, "'%s'", buf); + break; + default: + break; + } } static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { - TAOS_ROW row = taos_fetch_row(tres); - if (row == NULL) { - return 0; - } - - FILE* fp = fopen(fname, "at"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file: %s\n", __func__, __LINE__, fname); - return -1; - } - - int num_fields = taos_num_fields(tres); - TAOS_FIELD *fields = taos_fetch_fields(tres); - int precision = taos_result_precision(tres); + TAOS_ROW row = taos_fetch_row(tres); + if (row == NULL) { + return 0; + } - for (int col = 0; col < num_fields; col++) { - if (col > 0) { - fprintf(fp, ","); + FILE* fp = fopen(fname, "at"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file: %s\n", + __func__, __LINE__, fname); + return -1; } - fprintf(fp, "%s", fields[col].name); - } - fputc('\n', fp); - int numOfRows = 0; - do { - int32_t* length = taos_fetch_lengths(tres); - for (int i = 0; i < num_fields; i++) { - if (i > 0) { - fputc(',', fp); - } - xDumpFieldToFile(fp, (const char*)row[i], fields +i, length[i], precision); + int num_fields = taos_num_fields(tres); + TAOS_FIELD *fields = taos_fetch_fields(tres); + int precision = taos_result_precision(tres); + + for (int col = 0; col < num_fields; col++) { + if (col > 0) { + fprintf(fp, ","); + } + fprintf(fp, "%s", fields[col].name); } fputc('\n', fp); - numOfRows++; - row = taos_fetch_row(tres); - } while( row != NULL); + int numOfRows = 0; + do { + int32_t* length = taos_fetch_lengths(tres); + for (int i = 0; i < num_fields; i++) { + if (i > 0) { + fputc(',', fp); + } + xDumpFieldToFile(fp, + (const char*)row[i], fields +i, length[i], precision); + } + fputc('\n', fp); + + numOfRows++; + row = taos_fetch_row(tres); + } while( row != NULL); - fclose(fp); + fclose(fp); - return numOfRows; + return numOfRows; } static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; - res = taos_query(taos, "show databases;"); - int32_t code = taos_errno(res); + res = taos_query(taos, "show databases;"); + int32_t code = taos_errno(res); - if (code != 0) { - errorPrint( "failed to run , reason: %s\n", taos_errstr(res)); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); - - while((row = taos_fetch_row(res)) != NULL) { - // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { - continue; - } - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count); - return -1; - } - - tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - formatTimestamp(dbInfos[count]->create_time, - *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], - TSDB_TIME_PRECISION_MILLI); - dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], - fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); - - count++; - if (count > MAX_DATABASE_COUNT) { - errorPrint("%s() LN%d, The database count overflow than %d\n", - __func__, __LINE__, MAX_DATABASE_COUNT); - break; - } - } - - return count; + if (code != 0) { + errorPrint( "failed to run , reason: %s\n", + taos_errstr(res)); + return -1; + } + + TAOS_FIELD *fields = taos_fetch_fields(res); + + while((row = taos_fetch_row(res)) != NULL) { + // sys database name : 'log' + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { + continue; + } + + dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (dbInfos[count] == NULL) { + errorPrint( "failed to allocate memory for some dbInfo[%d]\n", count); + return -1; + } + + tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes); + formatTimestamp(dbInfos[count]->create_time, + *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], + TSDB_TIME_PRECISION_MILLI); + dbInfos[count]->ntables = *((int64_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + + tstrncpy(dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); + tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], + fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + + count++; + if (count > MAX_DATABASE_COUNT) { + errorPrint("%s() LN%d, The database count overflow than %d\n", + __func__, __LINE__, MAX_DATABASE_COUNT); + break; + } + } + + return count; } static void printfDbInfoForQueryToFile( char* filename, SDbInfo* dbInfos, int index) { - if (filename[0] == 0) - return; + if (filename[0] == 0) + return; - FILE *fp = fopen(filename, "at"); - if (fp == NULL) { - errorPrint( "failed to open file: %s\n", filename); - return; - } - - fprintf(fp, "================ database[%d] ================\n", index); - fprintf(fp, "name: %s\n", dbInfos->name); - fprintf(fp, "created_time: %s\n", dbInfos->create_time); - fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); - fprintf(fp, "replica: %d\n", dbInfos->replica); - fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); - fprintf(fp, "cache(MB): %d\n", dbInfos->cache); - fprintf(fp, "blocks: %d\n", dbInfos->blocks); - fprintf(fp, "minrows: %d\n", dbInfos->minrows); - fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); - fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); - fprintf(fp, "fsync: %d\n", dbInfos->fsync); - fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); - fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); - fprintf(fp, "\n"); - - fclose(fp); + FILE *fp = fopen(filename, "at"); + if (fp == NULL) { + errorPrint( "failed to open file: %s\n", filename); + return; + } + + fprintf(fp, "================ database[%d] ================\n", index); + fprintf(fp, "name: %s\n", dbInfos->name); + fprintf(fp, "created_time: %s\n", dbInfos->create_time); + fprintf(fp, "ntables: %"PRId64"\n", dbInfos->ntables); + fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); + fprintf(fp, "replica: %d\n", dbInfos->replica); + fprintf(fp, "quorum: %d\n", dbInfos->quorum); + fprintf(fp, "days: %d\n", dbInfos->days); + fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); + fprintf(fp, "cache(MB): %d\n", dbInfos->cache); + fprintf(fp, "blocks: %d\n", dbInfos->blocks); + fprintf(fp, "minrows: %d\n", dbInfos->minrows); + fprintf(fp, "maxrows: %d\n", dbInfos->maxrows); + fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); + fprintf(fp, "fsync: %d\n", dbInfos->fsync); + fprintf(fp, "comp: %d\n", dbInfos->comp); + fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); + fprintf(fp, "precision: %s\n", dbInfos->precision); + fprintf(fp, "update: %d\n", dbInfos->update); + fprintf(fp, "status: %s\n", dbInfos->status); + fprintf(fp, "\n"); + + fclose(fp); } static void printfQuerySystemInfo(TAOS * taos) { - char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; - char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; - TAOS_RES* res; - - time_t t; - struct tm* lt; - time(&t); - lt = localtime(&t); - snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", - lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, - lt->tm_sec); - - // show variables - res = taos_query(taos, "show variables;"); - //fetchResult(res, filename); - xDumpResultToFile(filename, res); - - // show dnodes - res = taos_query(taos, "show dnodes;"); - xDumpResultToFile(filename, res); - //fetchResult(res, filename); - - // show databases - res = taos_query(taos, "show databases;"); - SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); - return; - } - int dbCount = getDbFromServer(taos, dbInfos); - if (dbCount <= 0) { - free(dbInfos); - return; - } - - for (int i = 0; i < dbCount; i++) { - // printf database info - printfDbInfoForQueryToFile(filename, dbInfos[i], i); - - // show db.vgroups - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); - res = taos_query(taos, buffer); + char filename[MAX_QUERY_SQL_LENGTH+1] = {0}; + char buffer[MAX_QUERY_SQL_LENGTH+1] = {0}; + TAOS_RES* res; + + time_t t; + struct tm* lt; + time(&t); + lt = localtime(&t); + snprintf(filename, MAX_QUERY_SQL_LENGTH, "querySystemInfo-%d-%d-%d %d:%d:%d", + lt->tm_year+1900, lt->tm_mon, lt->tm_mday, lt->tm_hour, lt->tm_min, + lt->tm_sec); + + // show variables + res = taos_query(taos, "show variables;"); + //fetchResult(res, filename); xDumpResultToFile(filename, res); - // show db.stables - snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); - res = taos_query(taos, buffer); + // show dnodes + res = taos_query(taos, "show dnodes;"); xDumpResultToFile(filename, res); + //fetchResult(res, filename); + + // show databases + res = taos_query(taos, "show databases;"); + SDbInfo** dbInfos = (SDbInfo **)calloc(MAX_DATABASE_COUNT, sizeof(SDbInfo *)); + if (dbInfos == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); + return; + } + int dbCount = getDbFromServer(taos, dbInfos); + if (dbCount <= 0) { + free(dbInfos); + return; + } + + for (int i = 0; i < dbCount; i++) { + // printf database info + printfDbInfoForQueryToFile(filename, dbInfos[i], i); + + // show db.vgroups + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); - free(dbInfos[i]); - } + // show db.stables + snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); + res = taos_query(taos, buffer); + xDumpResultToFile(filename, res); + free(dbInfos[i]); + } - free(dbInfos); + free(dbInfos); } static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port, @@ -2154,24 +2235,24 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port request_buf = malloc(req_buf_len); if (NULL == request_buf) { - errorPrint("%s", "ERROR, cannot allocate memory.\n"); - exit(EXIT_FAILURE); + errorPrint("%s", "ERROR, cannot allocate memory.\n"); + exit(EXIT_FAILURE); } char userpass_buf[INPUT_BUF_LEN]; int mod_table[] = {0, 2, 1}; static char base64[] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', - 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', - 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', - 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', - 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', - 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', - 'w', 'x', 'y', 'z', '0', '1', '2', '3', - '4', '5', '6', '7', '8', '9', '+', '/'}; + 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', + 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', + 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', + 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', + 'w', 'x', 'y', 'z', '0', '1', '2', '3', + '4', '5', '6', '7', '8', '9', '+', '/'}; snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s", - g_Dbs.user, g_Dbs.password); + g_Dbs.user, g_Dbs.password); size_t userpass_buf_len = strlen(userpass_buf); size_t encoded_len = 4 * ((userpass_buf_len +2) / 3); @@ -2203,22 +2284,22 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port memset(base64_buf, 0, INPUT_BUF_LEN); for (int n = 0, m = 0; n < userpass_buf_len;) { - uint32_t oct_a = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t oct_b = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t oct_c = n < userpass_buf_len ? - (unsigned char) userpass_buf[n++]:0; - uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; + uint32_t oct_a = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t oct_b = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t oct_c = n < userpass_buf_len ? + (unsigned char) userpass_buf[n++]:0; + uint32_t triple = (oct_a << 0x10) + (oct_b << 0x08) + oct_c; - base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f]; - base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 3* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 2* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 1* 6) & 0x3f]; + base64_buf[m++] = base64[(triple >> 0* 6) & 0x3f]; } for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) - base64_buf[encoded_len - 1 - l] = '='; + base64_buf[encoded_len - 1 - l] = '='; debugPrint("%s() LN%d: auth string base64 encoded: %s\n", __func__, __LINE__, base64_buf); @@ -2276,7 +2357,7 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port printf("Response:\n%s\n", response_buf); if (strlen(pThreadInfo->filePath) > 0) { - appendResultBufToFile(response_buf, pThreadInfo); + appendResultBufToFile(response_buf, pThreadInfo); } free(request_buf); @@ -2291,167 +2372,186 @@ static int postProceSql(char *host, struct sockaddr_in *pServAddr, uint16_t port } static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - errorPrint("%s() LN%d, calloc failed! size:%d\n", - __func__, __LINE__, TSDB_MAX_SQL_LEN+1); - return NULL; - } + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + errorPrint("%s() LN%d, calloc failed! size:%d\n", + __func__, __LINE__, TSDB_MAX_SQL_LEN+1); + return NULL; + } - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "(%s)", stbInfo->tagDataBuf + stbInfo->lenOfTagOfOneRow * tagUsePos); - return dataBuf; + return dataBuf; } -static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) { - char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); - if (NULL == dataBuf) { - printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); - return NULL; - } - - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); - for (int i = 0; i < stbInfo->tagCount; i++) { - if ((0 == strncasecmp(stbInfo->tags[i].dataType, "binary", strlen("binary"))) - || (0 == strncasecmp(stbInfo->tags[i].dataType, "nchar", strlen("nchar")))) { - if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { - printf("binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - tmfree(dataBuf); +static char *generateBinaryNCharTagValues(int64_t tableSeq, uint32_t len) +{ + char* buf = (char*)calloc(len, 1); + if (NULL == buf) { + printf("calloc failed! size:%d\n", len); return NULL; - } + } + + if (tableSeq % 2) { + tstrncpy(buf, "beijing", len); + } else { + tstrncpy(buf, "shanghai", len); + } + //rand_string(buf, stbInfo->tags[i].dataLen); + + return buf; +} - int tagBufLen = stbInfo->tags[i].dataLen + 1; - char* buf = (char*)calloc(tagBufLen, 1); - if (NULL == buf) { - printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); - tmfree(dataBuf); +static char* generateTagValuesForStb(SSuperTable* stbInfo, int64_t tableSeq) { + char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); + if (NULL == dataBuf) { + printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); return NULL; - } - - if (tableSeq % 2) { - tstrncpy(buf, "beijing", tagBufLen); - } else { - tstrncpy(buf, "shanghai", tagBufLen); - } - //rand_string(buf, stbInfo->tags[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "\'%s\', ", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "int", strlen("int"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", tableSeq); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "bigint", strlen("bigint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64", ", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "float", strlen("float"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f, ", rand_float()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "double", strlen("double"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%f, ", rand_double()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "smallint", strlen("smallint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "tinyint", strlen("tinyint"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "bool", strlen("bool"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_bool()); - } else if (0 == strncasecmp(stbInfo->tags[i].dataType, - "timestamp", strlen("timestamp"))) { - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%"PRId64", ", rand_bigint()); - } else { - printf("No support data type: %s\n", stbInfo->tags[i].dataType); - tmfree(dataBuf); - return NULL; } - } - dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); - return dataBuf; + int dataLen = 0; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "("); + for (int i = 0; i < stbInfo->tagCount; i++) { + if ((0 == strncasecmp(stbInfo->tags[i].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(stbInfo->tags[i].dataType, + "nchar", strlen("nchar")))) { + if (stbInfo->tags[i].dataLen > TSDB_MAX_BINARY_LEN) { + printf("binary or nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + tmfree(dataBuf); + return NULL; + } + + int32_t tagBufLen = stbInfo->tags[i].dataLen + 1; + char *buf = generateBinaryNCharTagValues(tableSeq, tagBufLen); + if (NULL == buf) { + tmfree(dataBuf); + return NULL; + } + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "\'%s\',", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "int", strlen("int"))) { + if ((g_args.demo_mode) && (i == 0)) { + dataLen += snprintf(dataBuf + dataLen, + TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", tableSeq % 10); + } else { + dataLen += snprintf(dataBuf + dataLen, + TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", tableSeq); + } + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "bigint", strlen("bigint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "float", strlen("float"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%f,", rand_float()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "double", strlen("double"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%f,", rand_double()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "smallint", strlen("smallint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "tinyint", strlen("tinyint"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "bool", strlen("bool"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%d,", rand_bool()); + } else if (0 == strncasecmp(stbInfo->tags[i].dataType, + "timestamp", strlen("timestamp"))) { + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, + "%"PRId64",", rand_bigint()); + } else { + errorPrint("No support data type: %s\n", stbInfo->tags[i].dataType); + tmfree(dataBuf); + return NULL; + } + } + + dataLen -= 1; + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); + return dataBuf; } static int calcRowLen(SSuperTable* superTbls) { - int colIndex; - int lenOfOneRow = 0; - - for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { - char* dataType = superTbls->columns[colIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfOneRow += 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfOneRow += 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfOneRow += 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - lenOfOneRow += 21; - } else { - printf("get error data type : %s\n", dataType); - exit(-1); - } - } - - superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp - - int tagIndex; - int lenOfTagOfOneRow = 0; - for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { - char* dataType = superTbls->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; - } else { - printf("get error tag type : %s\n", dataType); - exit(-1); + int colIndex; + int lenOfOneRow = 0; + + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { + char* dataType = superTbls->columns[colIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + lenOfOneRow += 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + lenOfOneRow += 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + lenOfOneRow += 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + lenOfOneRow += 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + lenOfOneRow += 42; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + lenOfOneRow += 21; + } else { + printf("get error data type : %s\n", dataType); + exit(-1); + } + } + + superTbls->lenOfOneRow = lenOfOneRow + 20; // timestamp + + int tagIndex; + int lenOfTagOfOneRow = 0; + for (tagIndex = 0; tagIndex < superTbls->tagCount; tagIndex++) { + char* dataType = superTbls->tags[tagIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; + } else { + printf("get error tag type : %s\n", dataType); + exit(-1); + } } - } - superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; + superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - return 0; + return 0; } @@ -2459,77 +2559,84 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* dbName, char* sTblName, char** childTblNameOfSuperTbl, int64_t* childTblCountOfSuperTbl, int64_t limit, uint64_t offset) { - char command[BUFFER_SIZE] = "\0"; - char limitBuf[100] = "\0"; + char command[BUFFER_SIZE] = "\0"; + char limitBuf[100] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; + TAOS_RES * res; + TAOS_ROW row = NULL; - char* childTblName = *childTblNameOfSuperTbl; + char* childTblName = *childTblNameOfSuperTbl; - if (offset >= 0) { - snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", - limit, offset); - } + if (offset >= 0) { + snprintf(limitBuf, 100, " limit %"PRId64" offset %"PRIu64"", + limit, offset); + } - //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", - dbName, sTblName, limitBuf); + //get all child table name use cmd: select tbname from superTblName; + snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", + dbName, sTblName, limitBuf); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - taos_free_result(res); - taos_close(taos); - errorPrint("%s() LN%d, failed to run command %s\n", - __func__, __LINE__, command); - exit(-1); - } - - int64_t childTblCount = (limit < 0)?10000:limit; - int64_t count = 0; - if (childTblName == NULL) { - childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); - if (NULL == childTblName) { - taos_free_result(res); - taos_close(taos); - errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); - exit(-1); - } - } - - char* pTblName = childTblName; - while((row = taos_fetch_row(res)) != NULL) { - int32_t* len = taos_fetch_lengths(res); - tstrncpy(pTblName, (char *)row[0], len[0]+1); - //printf("==== sub table name: %s\n", pTblName); - count++; - if (count >= childTblCount - 1) { - char *tmp = realloc(childTblName, - (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); - if (tmp != NULL) { - childTblName = tmp; - childTblCount = (int)(childTblCount*1.5); - memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, - (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); - } else { - // exit, if allocate more memory failed - errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", - __func__, __LINE__, dbName, sTblName); - tmfree(childTblName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { taos_free_result(res); taos_close(taos); + errorPrint("%s() LN%d, failed to run command %s\n", + __func__, __LINE__, command); exit(-1); - } } - pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; - } - *childTblCountOfSuperTbl = count; - *childTblNameOfSuperTbl = childTblName; + int64_t childTblCount = (limit < 0)?10000:limit; + int64_t count = 0; + if (childTblName == NULL) { + childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + if (NULL == childTblName) { + taos_free_result(res); + taos_close(taos); + errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); + exit(-1); + } + } + + char* pTblName = childTblName; + while((row = taos_fetch_row(res)) != NULL) { + int32_t* len = taos_fetch_lengths(res); + + if (0 == strlen((char *)row[0])) { + errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", + __func__, __LINE__, count); + exit(-1); + } + + tstrncpy(pTblName, (char *)row[0], len[0]+1); + //printf("==== sub table name: %s\n", pTblName); + count++; + if (count >= childTblCount - 1) { + char *tmp = realloc(childTblName, + (size_t)childTblCount*1.5*TSDB_TABLE_NAME_LEN+1); + if (tmp != NULL) { + childTblName = tmp; + childTblCount = (int)(childTblCount*1.5); + memset(childTblName + count*TSDB_TABLE_NAME_LEN, 0, + (size_t)((childTblCount-count)*TSDB_TABLE_NAME_LEN)); + } else { + // exit, if allocate more memory failed + errorPrint("%s() LN%d, realloc fail for save child table name of %s.%s\n", + __func__, __LINE__, dbName, sTblName); + tmfree(childTblName); + taos_free_result(res); + taos_close(taos); + exit(-1); + } + } + pTblName = childTblName + count * TSDB_TABLE_NAME_LEN; + } + + *childTblCountOfSuperTbl = count; + *childTblNameOfSuperTbl = childTblName; - taos_free_result(res); - return 0; + taos_free_result(res); + return 0; } static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, @@ -2544,541 +2651,580 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { - char command[BUFFER_SIZE] = "\0"; - TAOS_RES * res; - TAOS_ROW row = NULL; - int count = 0; - - //get schema use cmd: describe superTblName; - snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); - res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - printf("failed to run command %s\n", command); + char command[BUFFER_SIZE] = "\0"; + TAOS_RES * res; + TAOS_ROW row = NULL; + int count = 0; + + //get schema use cmd: describe superTblName; + snprintf(command, BUFFER_SIZE, "describe %s.%s", dbName, superTbls->sTblName); + res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + printf("failed to run command %s\n", command); + taos_free_result(res); + return -1; + } + + int tagIndex = 0; + int columnIndex = 0; + TAOS_FIELD *fields = taos_fetch_fields(res); + while((row = taos_fetch_row(res)) != NULL) { + if (0 == count) { + count++; + continue; + } + + if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { + tstrncpy(superTbls->tags[tagIndex].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + tstrncpy(superTbls->tags[tagIndex].dataType, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + superTbls->tags[tagIndex].dataLen = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(superTbls->tags[tagIndex].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(NOTE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); + tagIndex++; + } else { + tstrncpy(superTbls->columns[columnIndex].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); + tstrncpy(superTbls->columns[columnIndex].dataType, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(DATATYPE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + superTbls->columns[columnIndex].dataLen = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(superTbls->columns[columnIndex].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(NOTE_BUFF_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes) + 1); + columnIndex++; + } + count++; + } + + superTbls->columnCount = columnIndex; + superTbls->tagCount = tagIndex; taos_free_result(res); - return -1; - } - - int tagIndex = 0; - int columnIndex = 0; - TAOS_FIELD *fields = taos_fetch_fields(res); - while((row = taos_fetch_row(res)) != NULL) { - if (0 == count) { - count++; - continue; - } - - if (strcmp((char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], "TAG") == 0) { - tstrncpy(superTbls->tags[tagIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - tstrncpy(superTbls->tags[tagIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - superTbls->tags[tagIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->tags[tagIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - tagIndex++; - } else { - tstrncpy(superTbls->columns[columnIndex].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - tstrncpy(superTbls->columns[columnIndex].dataType, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - superTbls->columns[columnIndex].dataLen = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(superTbls->columns[columnIndex].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - columnIndex++; - } - count++; - } - - superTbls->columnCount = columnIndex; - superTbls->tagCount = tagIndex; - taos_free_result(res); - - calcRowLen(superTbls); -/* - if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { + calcRowLen(superTbls); + + /* + if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { //get all child table name use cmd: select tbname from superTblName; int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); - return -1; + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + return -1; } getAllChildNameOfSuperTable(taos, dbName, - superTbls->sTblName, - &superTbls->childTblName, - &superTbls->childTblCount); - } - */ - return 0; + superTbls->sTblName, + &superTbls->childTblName, + &superTbls->childTblCount); + } + */ + return 0; } static int createSuperTable( TAOS * taos, char* dbName, SSuperTable* superTbl) { - char command[BUFFER_SIZE] = "\0"; + char command[BUFFER_SIZE] = "\0"; - char cols[STRING_LEN] = "\0"; - int colIndex; - int len = 0; + char cols[COL_BUFFER_LEN] = "\0"; + int colIndex; + int len = 0; - int lenOfOneRow = 0; + int lenOfOneRow = 0; - if (superTbl->columnCount == 0) { - errorPrint("%s() LN%d, super table column count is %d\n", - __func__, __LINE__, superTbl->columnCount); - return -1; - } - - for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { - char* dataType = superTbl->columns[colIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "BINARY", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(cols + len, STRING_LEN - len, - ", col%d %s(%d)", colIndex, "NCHAR", - superTbl->columns[colIndex].dataLen); - lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "INT"); - lenOfOneRow += 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BIGINT"); - lenOfOneRow += 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "SMALLINT"); - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TINYINT"); - lenOfOneRow += 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "BOOL"); - lenOfOneRow += 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "FLOAT"); - lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "DOUBLE"); - lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { - len += snprintf(cols + len, STRING_LEN - len, ", col%d %s", colIndex, "TIMESTAMP"); - lenOfOneRow += 21; - } else { - taos_close(taos); - errorPrint("%s() LN%d, config error data type : %s\n", - __func__, __LINE__, dataType); - exit(-1); + if (superTbl->columnCount == 0) { + errorPrint("%s() LN%d, super table column count is %d\n", + __func__, __LINE__, superTbl->columnCount); + return -1; } - } - - superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp - //printf("%s.%s column count:%d, column length:%d\n\n", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbl[j].sTblName, g_Dbs.db[i].superTbl[j].columnCount, lenOfOneRow); - - // save for creating child table - superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); - if (NULL == superTbl->colsOfCreateChildTable) { - errorPrint("%s() LN%d, Failed when calloc, size:%d", - __func__, __LINE__, len+1); - taos_close(taos); - exit(-1); - } - snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); - verbosePrint("%s() LN%d: %s\n", - __func__, __LINE__, superTbl->colsOfCreateChildTable); + for (colIndex = 0; colIndex < superTbl->columnCount; colIndex++) { + char* dataType = superTbl->columns[colIndex].dataType; + + if (strcasecmp(dataType, "BINARY") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "BINARY", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ",C%d %s(%d)", colIndex, "NCHAR", + superTbl->columns[colIndex].dataLen); + lenOfOneRow += superTbl->columns[colIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + if ((g_args.demo_mode) && (colIndex == 1)) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, + ", VOLTAGE INT"); + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "INT"); + } + lenOfOneRow += 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "BIGINT"); + lenOfOneRow += 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "SMALLINT"); + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "TINYINT"); + lenOfOneRow += 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "BOOL"); + lenOfOneRow += 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + if (g_args.demo_mode) { + if (colIndex == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", CURRENT FLOAT"); + } else if (colIndex == 2) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ", PHASE FLOAT"); + } + } else { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", colIndex, "FLOAT"); + } - if (superTbl->tagCount == 0) { - errorPrint("%s() LN%d, super table tag count is %d\n", - __func__, __LINE__, superTbl->tagCount); - return -1; - } - - char tags[STRING_LEN] = "\0"; - int tagIndex; - len = 0; - - int lenOfTagOfOneRow = 0; - len += snprintf(tags + len, STRING_LEN - len, "("); - for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { - char* dataType = superTbl->tags[tagIndex].dataType; - - if (strcasecmp(dataType, "BINARY") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, - "BINARY", superTbl->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "NCHAR") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s(%d), ", tagIndex, - "NCHAR", superTbl->tags[tagIndex].dataLen); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; - } else if (strcasecmp(dataType, "INT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "INT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11; - } else if (strcasecmp(dataType, "BIGINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BIGINT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21; - } else if (strcasecmp(dataType, "SMALLINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "SMALLINT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "TINYINT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "TINYINT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4; - } else if (strcasecmp(dataType, "BOOL") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "BOOL"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; - } else if (strcasecmp(dataType, "FLOAT") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "FLOAT"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { - len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, - "DOUBLE"); - lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; - } else { - taos_close(taos); - errorPrint("%s() LN%d, config error tag type : %s\n", - __func__, __LINE__, dataType); - exit(-1); - } - } - - len -= 2; - len += snprintf(tags + len, STRING_LEN - len, ")"); - - superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; - - snprintf(command, BUFFER_SIZE, - "create table if not exists %s.%s (ts timestamp%s) tags %s", - dbName, superTbl->sTblName, cols, tags); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - errorPrint( "create supertable %s failed!\n\n", - superTbl->sTblName); - return -1; - } - debugPrint("create supertable %s success!\n\n", superTbl->sTblName); - return 0; -} + lenOfOneRow += 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "DOUBLE"); + lenOfOneRow += 42; + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + len += snprintf(cols + len, COL_BUFFER_LEN - len, ",C%d %s", + colIndex, "TIMESTAMP"); + lenOfOneRow += 21; + } else { + taos_close(taos); + errorPrint("%s() LN%d, config error data type : %s\n", + __func__, __LINE__, dataType); + exit(-1); + } + } -static int createDatabasesAndStables() { - TAOS * taos = NULL; - int ret = 0; - taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); - if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); - return -1; - } - char command[BUFFER_SIZE] = "\0"; + superTbl->lenOfOneRow = lenOfOneRow + 20; // timestamp - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.db[i].drop) { - sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { - taos_close(taos); - return -1; - } - - int dataLen = 0; - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); - - if (g_Dbs.db[i].dbCfg.blocks > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); - } - if (g_Dbs.db[i].dbCfg.cache > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); - } - if (g_Dbs.db[i].dbCfg.days > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); - } - if (g_Dbs.db[i].dbCfg.keep > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); - } - if (g_Dbs.db[i].dbCfg.quorum > 1) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); - } - if (g_Dbs.db[i].dbCfg.replica > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); - } - if (g_Dbs.db[i].dbCfg.update > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); - } - //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { - // dataLen += snprintf(command + dataLen, - // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); - //} - if (g_Dbs.db[i].dbCfg.minRows > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); - } - if (g_Dbs.db[i].dbCfg.maxRows > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); - } - if (g_Dbs.db[i].dbCfg.comp > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); - } - if (g_Dbs.db[i].dbCfg.walLevel > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); - } - if (g_Dbs.db[i].dbCfg.cacheLast > 0) { - dataLen += snprintf(command + dataLen, - BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); - } - if (g_Dbs.db[i].dbCfg.fsync > 0) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " fsync %d", g_Dbs.db[i].dbCfg.fsync); - } - if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) - || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, - "us", strlen("us")))) { - dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, - " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); - } - - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + // save for creating child table + superTbl->colsOfCreateChildTable = (char*)calloc(len+20, 1); + if (NULL == superTbl->colsOfCreateChildTable) { + errorPrint("%s() LN%d, Failed when calloc, size:%d", + __func__, __LINE__, len+1); taos_close(taos); - errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); - return -1; - } - printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); + exit(-1); } - debugPrint("%s() LN%d supertbl count:%"PRIu64"\n", - __func__, __LINE__, g_Dbs.db[i].superTblCount); + snprintf(superTbl->colsOfCreateChildTable, len+20, "(ts timestamp%s)", cols); + verbosePrint("%s() LN%d: %s\n", + __func__, __LINE__, superTbl->colsOfCreateChildTable); - int validStbCount = 0; + if (superTbl->tagCount == 0) { + errorPrint("%s() LN%d, super table tag count is %d\n", + __func__, __LINE__, superTbl->tagCount); + return -1; + } - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, - g_Dbs.db[i].superTbls[j].sTblName); - ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); + char tags[TSDB_MAX_TAGS_LEN] = "\0"; + int tagIndex; + len = 0; - if ((ret != 0) || (g_Dbs.db[i].drop)) { - ret = createSuperTable(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); + int lenOfTagOfOneRow = 0; + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, "("); + for (tagIndex = 0; tagIndex < superTbl->tagCount; tagIndex++) { + char* dataType = superTbl->tags[tagIndex].dataType; - if (0 != ret) { - errorPrint("create super table %"PRIu64" failed!\n\n", j); - continue; + if (strcasecmp(dataType, "BINARY") == 0) { + if ((g_args.demo_mode) && (tagIndex == 1)) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "location BINARY(%d),", + superTbl->tags[tagIndex].dataLen); + } else { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s(%d),", tagIndex, "BINARY", + superTbl->tags[tagIndex].dataLen); + } + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "NCHAR") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s(%d),", tagIndex, + "NCHAR", superTbl->tags[tagIndex].dataLen); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 3; + } else if (strcasecmp(dataType, "INT") == 0) { + if ((g_args.demo_mode) && (tagIndex == 0)) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "groupId INT, "); + } else { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "INT"); + } + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 11; + } else if (strcasecmp(dataType, "BIGINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BIGINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 21; + } else if (strcasecmp(dataType, "SMALLINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "SMALLINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "TINYINT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "TINYINT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 4; + } else if (strcasecmp(dataType, "BOOL") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "BOOL"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 6; + } else if (strcasecmp(dataType, "FLOAT") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "FLOAT"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 22; + } else if (strcasecmp(dataType, "DOUBLE") == 0) { + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, + "T%d %s,", tagIndex, "DOUBLE"); + lenOfTagOfOneRow += superTbl->tags[tagIndex].dataLen + 42; + } else { + taos_close(taos); + errorPrint("%s() LN%d, config error tag type : %s\n", + __func__, __LINE__, dataType); + exit(-1); } - } - - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); - if (0 != ret) { - errorPrint("\nget super table %s.%s info failed!\n\n", - g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); - continue; - } - - validStbCount ++; } - g_Dbs.db[i].superTblCount = validStbCount; - } + len -= 1; + len += snprintf(tags + len, TSDB_MAX_TAGS_LEN - len, ")"); - taos_close(taos); - return 0; + superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow; + + snprintf(command, BUFFER_SIZE, + "create table if not exists %s.%s (ts timestamp%s) tags %s", + dbName, superTbl->sTblName, cols, tags); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + errorPrint( "create supertable %s failed!\n\n", + superTbl->sTblName); + return -1; + } + debugPrint("create supertable %s success!\n\n", superTbl->sTblName); + return 0; } -static void* createTable(void *sarg) -{ - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - - uint64_t lastPrintTime = taosGetTimestampMs(); - - int buff_len; - buff_len = BUFFER_SIZE / 8; - - pThreadInfo->buffer = calloc(buff_len, 1); - if (pThreadInfo->buffer == NULL) { - errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); - exit(-1); - } - - int len = 0; - int batchNum = 0; - - verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->start_table_from, pThreadInfo->end_table_to); - - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - if (0 == g_Dbs.use_metric) { - snprintf(pThreadInfo->buffer, buff_len, - "create table if not exists %s.%s%"PRIu64" %s;", - pThreadInfo->db_name, - g_args.tb_prefix, i, - pThreadInfo->cols); - } else { - if (superTblInfo == NULL) { - errorPrint("%s() LN%d, use metric, but super table info is NULL\n", - __func__, __LINE__); - free(pThreadInfo->buffer); - exit(-1); - } else { - if (0 == len) { - batchNum = 0; - memset(pThreadInfo->buffer, 0, buff_len); - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, "create table "); - } - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo, i); - } else { - tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - free(pThreadInfo->buffer); - return NULL; +static int createDatabasesAndStables() { + TAOS * taos = NULL; + int ret = 0; + taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); + if (taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + return -1; + } + char command[BUFFER_SIZE] = "\0"; + + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.db[i].drop) { + sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + taos_close(taos); + return -1; + } + + int dataLen = 0; + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); + + if (g_Dbs.db[i].dbCfg.blocks > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " blocks %d", g_Dbs.db[i].dbCfg.blocks); + } + if (g_Dbs.db[i].dbCfg.cache > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " cache %d", g_Dbs.db[i].dbCfg.cache); + } + if (g_Dbs.db[i].dbCfg.days > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " days %d", g_Dbs.db[i].dbCfg.days); + } + if (g_Dbs.db[i].dbCfg.keep > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " keep %d", g_Dbs.db[i].dbCfg.keep); + } + if (g_Dbs.db[i].dbCfg.quorum > 1) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " quorum %d", g_Dbs.db[i].dbCfg.quorum); + } + if (g_Dbs.db[i].dbCfg.replica > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " replica %d", g_Dbs.db[i].dbCfg.replica); + } + if (g_Dbs.db[i].dbCfg.update > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " update %d", g_Dbs.db[i].dbCfg.update); + } + //if (g_Dbs.db[i].dbCfg.maxtablesPerVnode > 0) { + // dataLen += snprintf(command + dataLen, + // BUFFER_SIZE - dataLen, "tables %d ", g_Dbs.db[i].dbCfg.maxtablesPerVnode); + //} + if (g_Dbs.db[i].dbCfg.minRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " minrows %d", g_Dbs.db[i].dbCfg.minRows); + } + if (g_Dbs.db[i].dbCfg.maxRows > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " maxrows %d", g_Dbs.db[i].dbCfg.maxRows); + } + if (g_Dbs.db[i].dbCfg.comp > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " comp %d", g_Dbs.db[i].dbCfg.comp); + } + if (g_Dbs.db[i].dbCfg.walLevel > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " wal %d", g_Dbs.db[i].dbCfg.walLevel); + } + if (g_Dbs.db[i].dbCfg.cacheLast > 0) { + dataLen += snprintf(command + dataLen, + BUFFER_SIZE - dataLen, " cachelast %d", g_Dbs.db[i].dbCfg.cacheLast); + } + if (g_Dbs.db[i].dbCfg.fsync > 0) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + " fsync %d", g_Dbs.db[i].dbCfg.fsync); + } + if ((0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, "ms", strlen("ms"))) +#if NANO_SECOND_ENABLED == 1 + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, + "ns", strlen("ns"))) +#endif + || (0 == strncasecmp(g_Dbs.db[i].dbCfg.precision, + "us", strlen("us")))) { + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, + " precision \'%s\';", g_Dbs.db[i].dbCfg.precision); + } + + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { + taos_close(taos); + errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); + return -1; + } + printf("\ncreate database %s success!\n\n", g_Dbs.db[i].dbName); } - len += snprintf(pThreadInfo->buffer + len, - buff_len - len, - "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", - pThreadInfo->db_name, superTblInfo->childTblPrefix, - i, pThreadInfo->db_name, - superTblInfo->sTblName, tagsValBuf); - free(tagsValBuf); - batchNum++; - if ((batchNum < superTblInfo->batchCreateTableNum) - && ((buff_len - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { - continue; + + debugPrint("%s() LN%d supertbl count:%"PRIu64"\n", + __func__, __LINE__, g_Dbs.db[i].superTblCount); + + int validStbCount = 0; + + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, + g_Dbs.db[i].superTbls[j].sTblName); + ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); + + if ((ret != 0) || (g_Dbs.db[i].drop)) { + ret = createSuperTable(taos, g_Dbs.db[i].dbName, + &g_Dbs.db[i].superTbls[j]); + + if (0 != ret) { + errorPrint("create super table %"PRIu64" failed!\n\n", j); + continue; + } + } + + ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, + &g_Dbs.db[i].superTbls[j]); + if (0 != ret) { + errorPrint("\nget super table %s.%s info failed!\n\n", + g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); + continue; + } + + validStbCount ++; } - } + + g_Dbs.db[i].superTblCount = validStbCount; } - len = 0; - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)){ - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); - free(pThreadInfo->buffer); - return NULL; + taos_close(taos); + return 0; +} + +static void* createTable(void *sarg) +{ + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + setThreadName("createTable"); + + uint64_t lastPrintTime = taosGetTimestampMs(); + + int buff_len = BUFFER_SIZE; + + pThreadInfo->buffer = calloc(buff_len, 1); + if (pThreadInfo->buffer == NULL) { + errorPrint("%s() LN%d, Memory allocated failed!\n", __func__, __LINE__); + exit(-1); } - uint64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", - pThreadInfo->threadID, pThreadInfo->start_table_from, i); - lastPrintTime = currentPrintTime; + int len = 0; + int batchNum = 0; + + verbosePrint("%s() LN%d: Creating table from %"PRIu64" to %"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->start_table_from, pThreadInfo->end_table_to); + + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + if (0 == g_Dbs.use_metric) { + snprintf(pThreadInfo->buffer, buff_len, + "create table if not exists %s.%s%"PRIu64" %s;", + pThreadInfo->db_name, + g_args.tb_prefix, i, + pThreadInfo->cols); + } else { + if (superTblInfo == NULL) { + errorPrint("%s() LN%d, use metric, but super table info is NULL\n", + __func__, __LINE__); + free(pThreadInfo->buffer); + exit(-1); + } else { + if (0 == len) { + batchNum = 0; + memset(pThreadInfo->buffer, 0, buff_len); + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, "create table "); + } + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(superTblInfo, i); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + i % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + free(pThreadInfo->buffer); + return NULL; + } + len += snprintf(pThreadInfo->buffer + len, + buff_len - len, + "if not exists %s.%s%"PRIu64" using %s.%s tags %s ", + pThreadInfo->db_name, superTblInfo->childTblPrefix, + i, pThreadInfo->db_name, + superTblInfo->sTblName, tagsValBuf); + free(tagsValBuf); + batchNum++; + if ((batchNum < superTblInfo->batchCreateTableNum) + && ((buff_len - len) + >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } + } + } + + len = 0; + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)){ + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + free(pThreadInfo->buffer); + return NULL; + } + + uint64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] already create %"PRIu64" - %"PRIu64" tables\n", + pThreadInfo->threadID, pThreadInfo->start_table_from, i); + lastPrintTime = currentPrintTime; + } } - } - if (0 != len) { - if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, - NO_INSERT_TYPE, false)) { - errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + if (0 != len) { + if (0 != queryDbExec(pThreadInfo->taos, pThreadInfo->buffer, + NO_INSERT_TYPE, false)) { + errorPrint( "queryDbExec() failed. buffer:\n%s\n", pThreadInfo->buffer); + } } - } - free(pThreadInfo->buffer); - return NULL; + free(pThreadInfo->buffer); + return NULL; } static int startMultiThreadCreateChildTable( char* cols, int threads, uint64_t tableFrom, int64_t ntables, char* db_name, SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - - if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed\n"); - exit(-1); - } - - if (threads < 1) { - threads = 1; - } - - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int64_t b = 0; - b = ntables % threads; - - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE); - pThreadInfo->superTblInfo = superTblInfo; - verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); - pThreadInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - db_name, - g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", - __func__, __LINE__, taos_errstr(NULL)); - free(pids); - free(infos); - return -1; - } - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->use_metric = true; - pThreadInfo->cols = cols; - pThreadInfo->minDelay = UINT64_MAX; - pthread_create(pids + i, NULL, createTable, pThreadInfo); - } - - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - } + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - free(pids); - free(infos); + if ((NULL == pids) || (NULL == infos)) { + printf("malloc failed\n"); + exit(-1); + } + + if (threads < 1) { + threads = 1; + } + + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int64_t b = 0; + b = ntables % threads; + + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); + pThreadInfo->superTblInfo = superTblInfo; + verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); + pThreadInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + db_name, + g_Dbs.port); + if (pThreadInfo->taos == NULL) { + errorPrint( "%s() LN%d, Failed to connect to TDengine, reason:%s\n", + __func__, __LINE__, taos_errstr(NULL)); + free(pids); + free(infos); + return -1; + } + + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->use_metric = true; + pThreadInfo->cols = cols; + pThreadInfo->minDelay = UINT64_MAX; + pthread_create(pids + i, NULL, createTable, pThreadInfo); + } - return 0; + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } + + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + taos_close(pThreadInfo->taos); + } + + free(pids); + free(infos); + + return 0; } static void createChildTables() { - char tblColsBuf[MAX_SQL_SIZE]; + char tblColsBuf[TSDB_MAX_BYTES_PER_ROW]; int len; for (int i = 0; i < g_Dbs.dbCount; i++) { @@ -3086,12 +3232,14 @@ static void createChildTables() { if (g_Dbs.db[i].superTblCount > 0) { // with super table for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if ((AUTO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) - || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { + if ((AUTO_CREATE_SUBTBL + == g_Dbs.db[i].superTbls[j].autoCreateTable) + || (TBL_ALREADY_EXISTS + == g_Dbs.db[i].superTbls[j].childTblExists)) { continue; } verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); uint64_t startFrom = 0; g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; @@ -3108,21 +3256,21 @@ static void createChildTables() { } } else { // normal table - len = snprintf(tblColsBuf, MAX_SQL_SIZE, "(TS TIMESTAMP"); + len = snprintf(tblColsBuf, TSDB_MAX_BYTES_PER_ROW, "(TS TIMESTAMP"); for (int j = 0; j < g_args.num_of_CPR; j++) { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s(%d)", j, g_args.datatype[j], g_args.len_of_binary); } else { - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, - ", COL%d %s", j, g_args.datatype[j]); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, + ",C%d %s", j, g_args.datatype[j]); } len = strlen(tblColsBuf); } - snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ")"); + snprintf(tblColsBuf + len, TSDB_MAX_BYTES_PER_ROW - len, ")"); verbosePrint("%s() LN%d: dbName: %s num of tb: %"PRId64" schema: %s\n", __func__, __LINE__, @@ -3139,1781 +3287,1825 @@ static void createChildTables() { } /* - Read 10000 lines at most. If more than 10000 lines, continue to read after using -*/ + Read 10000 lines at most. If more than 10000 lines, continue to read after using + */ static int readTagFromCsvFileToMem(SSuperTable * superTblInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - - FILE *fp = fopen(superTblInfo->tagsFile, "r"); - if (fp == NULL) { - printf("Failed to open tags file: %s, reason:%s\n", - superTblInfo->tagsFile, strerror(errno)); - return -1; - } - - if (superTblInfo->tagDataBuf) { - free(superTblInfo->tagDataBuf); - superTblInfo->tagDataBuf = NULL; - } - - int tagCount = 10000; - int count = 0; - char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); - if (tagDataBuf == NULL) { - printf("Failed to calloc, reason:%s\n", strerror(errno)); - fclose(fp); - return -1; - } - - while((readLen = tgetline(&line, &n, fp)) != -1) { - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + + FILE *fp = fopen(superTblInfo->tagsFile, "r"); + if (fp == NULL) { + printf("Failed to open tags file: %s, reason:%s\n", + superTblInfo->tagsFile, strerror(errno)); + return -1; } - if (readLen == 0) { - continue; + if (superTblInfo->tagDataBuf) { + free(superTblInfo->tagDataBuf); + superTblInfo->tagDataBuf = NULL; } - memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); - count++; - - if (count >= tagCount - 1) { - char *tmp = realloc(tagDataBuf, - (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); - if (tmp != NULL) { - tagDataBuf = tmp; - tagCount = (int)(tagCount*1.5); - memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, - 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); - } else { - // exit, if allocate more memory failed - printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); - tmfree(tagDataBuf); - free(line); + int tagCount = 10000; + int count = 0; + char* tagDataBuf = calloc(1, superTblInfo->lenOfTagOfOneRow * tagCount); + if (tagDataBuf == NULL) { + printf("Failed to calloc, reason:%s\n", strerror(errno)); fclose(fp); return -1; - } } - } - superTblInfo->tagDataBuf = tagDataBuf; - superTblInfo->tagSampleCount = count; + while((readLen = tgetline(&line, &n, fp)) != -1) { + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } - free(line); - fclose(fp); - return 0; -} + if (readLen == 0) { + continue; + } -#if 0 -int readSampleFromJsonFileToMem(SSuperTable * superTblInfo) { - // TODO - return 0; + memcpy(tagDataBuf + count * superTblInfo->lenOfTagOfOneRow, line, readLen); + count++; + + if (count >= tagCount - 1) { + char *tmp = realloc(tagDataBuf, + (size_t)tagCount*1.5*superTblInfo->lenOfTagOfOneRow); + if (tmp != NULL) { + tagDataBuf = tmp; + tagCount = (int)(tagCount*1.5); + memset(tagDataBuf + count*superTblInfo->lenOfTagOfOneRow, + 0, (size_t)((tagCount-count)*superTblInfo->lenOfTagOfOneRow)); + } else { + // exit, if allocate more memory failed + printf("realloc fail for save tag val from %s\n", superTblInfo->tagsFile); + tmfree(tagDataBuf); + free(line); + fclose(fp); + return -1; + } + } + } + + superTblInfo->tagDataBuf = tagDataBuf; + superTblInfo->tagSampleCount = count; + + free(line); + fclose(fp); + return 0; } -#endif /* - Read 10000 lines at most. If more than 10000 lines, continue to read after using -*/ + Read 10000 lines at most. If more than 10000 lines, continue to read after using + */ static int readSampleFromCsvFileToMem( SSuperTable* superTblInfo) { - size_t n = 0; - ssize_t readLen = 0; - char * line = NULL; - int getRows = 0; - - FILE* fp = fopen(superTblInfo->sampleFile, "r"); - if (fp == NULL) { - errorPrint( "Failed to open sample file: %s, reason:%s\n", - superTblInfo->sampleFile, strerror(errno)); - return -1; - } - - assert(superTblInfo->sampleDataBuf); - memset(superTblInfo->sampleDataBuf, 0, - MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); - while(1) { - readLen = tgetline(&line, &n, fp); - if (-1 == readLen) { - if(0 != fseek(fp, 0, SEEK_SET)) { - errorPrint( "Failed to fseek file: %s, reason:%s\n", + size_t n = 0; + ssize_t readLen = 0; + char * line = NULL; + int getRows = 0; + + FILE* fp = fopen(superTblInfo->sampleFile, "r"); + if (fp == NULL) { + errorPrint( "Failed to open sample file: %s, reason:%s\n", superTblInfo->sampleFile, strerror(errno)); - fclose(fp); return -1; - } - continue; } - if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { - line[--readLen] = 0; - } + assert(superTblInfo->sampleDataBuf); + memset(superTblInfo->sampleDataBuf, 0, + MAX_SAMPLES_ONCE_FROM_FILE * superTblInfo->lenOfOneRow); + while(1) { + readLen = tgetline(&line, &n, fp); + if (-1 == readLen) { + if(0 != fseek(fp, 0, SEEK_SET)) { + errorPrint( "Failed to fseek file: %s, reason:%s\n", + superTblInfo->sampleFile, strerror(errno)); + fclose(fp); + return -1; + } + continue; + } - if (readLen == 0) { - continue; - } + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { + line[--readLen] = 0; + } - if (readLen > superTblInfo->lenOfOneRow) { - printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", - (int32_t)readLen, superTblInfo->lenOfOneRow); - continue; - } + if (readLen == 0) { + continue; + } + + if (readLen > superTblInfo->lenOfOneRow) { + printf("sample row len[%d] overflow define schema len[%"PRIu64"], so discard this row\n", + (int32_t)readLen, superTblInfo->lenOfOneRow); + continue; + } - memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, - line, readLen); - getRows++; + memcpy(superTblInfo->sampleDataBuf + getRows * superTblInfo->lenOfOneRow, + line, readLen); + getRows++; - if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { - break; + if (getRows == MAX_SAMPLES_ONCE_FROM_FILE) { + break; + } } - } - fclose(fp); - tmfree(line); - return 0; + fclose(fp); + tmfree(line); + return 0; } static bool getColumnAndTagTypeFromInsertJsonFile( cJSON* stbInfo, SSuperTable* superTbls) { - bool ret = false; - - // columns - cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); - if (columns && columns->type != cJSON_Array) { - printf("ERROR: failed to read json, columns not found\n"); - goto PARSE_OVER; - } else if (NULL == columns) { - superTbls->columnCount = 0; - superTbls->tagCount = 0; - return true; - } + bool ret = false; - int columnSize = cJSON_GetArraySize(columns); - if ((columnSize + 1/* ts */) > MAX_COLUMN_COUNT) { - errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", - __func__, __LINE__, MAX_COLUMN_COUNT); - goto PARSE_OVER; - } + // columns + cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); + if (columns && columns->type != cJSON_Array) { + printf("ERROR: failed to read json, columns not found\n"); + goto PARSE_OVER; + } else if (NULL == columns) { + superTbls->columnCount = 0; + superTbls->tagCount = 0; + return true; + } - int count = 1; - int index = 0; - StrColumn columnCase; + int columnSize = cJSON_GetArraySize(columns); + if ((columnSize + 1/* ts */) > TSDB_MAX_COLUMNS) { + errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", + __func__, __LINE__, TSDB_MAX_COLUMNS); + goto PARSE_OVER; + } - //superTbls->columnCount = columnSize; - for (int k = 0; k < columnSize; ++k) { - cJSON* column = cJSON_GetArrayItem(columns, k); - if (column == NULL) continue; + int count = 1; + int index = 0; + StrColumn columnCase; - count = 1; - cJSON* countObj = cJSON_GetObjectItem(column, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column count not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(column, "type"); - if (!dataType || dataType->type != cJSON_String - || dataType->valuestring == NULL) { - errorPrint("%s() LN%d: failed to read json, column type not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - - cJSON* dataLen = cJSON_GetObjectItem(column, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - debugPrint("%s() LN%d: failed to read json, column len not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - columnCase.dataLen = 8; - } - - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->columns[index].dataType, - columnCase.dataType, MAX_TB_NAME_SIZE); - superTbls->columns[index].dataLen = columnCase.dataLen; - index++; - } - } - - if ((index + 1 /* ts */) > MAX_COLUMN_COUNT) { - errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", - __func__, __LINE__, MAX_COLUMN_COUNT); - goto PARSE_OVER; - } - - superTbls->columnCount = index; - - count = 1; - index = 0; - // tags - cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); - if (!tags || tags->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, tags not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - int tagSize = cJSON_GetArraySize(tags); - if (tagSize > MAX_TAG_COUNT) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", - __func__, __LINE__, MAX_TAG_COUNT); - goto PARSE_OVER; - } - - //superTbls->tagCount = tagSize; - for (int k = 0; k < tagSize; ++k) { - cJSON* tag = cJSON_GetArrayItem(tags, k); - if (tag == NULL) continue; + //superTbls->columnCount = columnSize; + for (int k = 0; k < columnSize; ++k) { + cJSON* column = cJSON_GetArrayItem(columns, k); + if (column == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(column, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + errorPrint("%s() LN%d, failed to read json, column count not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + count = 1; + } + + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(column, "type"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d: failed to read json, column type not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, DATATYPE_BUFF_LEN); + tstrncpy(columnCase.dataType, dataType->valuestring, + min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); + + cJSON* dataLen = cJSON_GetObjectItem(column, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + debugPrint("%s() LN%d: failed to read json, column len not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + columnCase.dataLen = SMALL_BUFF_LEN; + } + + for (int n = 0; n < count; ++n) { + tstrncpy(superTbls->columns[index].dataType, + columnCase.dataType, + min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); + superTbls->columns[index].dataLen = columnCase.dataLen; + index++; + } + } + + if ((index + 1 /* ts */) > MAX_NUM_COLUMNS) { + errorPrint("%s() LN%d, failed to read json, column size overflow, allowed max column size is %d\n", + __func__, __LINE__, MAX_NUM_COLUMNS); + goto PARSE_OVER; + } + + superTbls->columnCount = index; count = 1; - cJSON* countObj = cJSON_GetObjectItem(tag, "count"); - if (countObj && countObj->type == cJSON_Number) { - count = countObj->valueint; - } else if (countObj && countObj->type != cJSON_Number) { - printf("ERROR: failed to read json, column count not found\n"); - goto PARSE_OVER; - } else { - count = 1; - } - - // column info - memset(&columnCase, 0, sizeof(StrColumn)); - cJSON *dataType = cJSON_GetObjectItem(tag, "type"); - if (!dataType || dataType->type != cJSON_String - || dataType->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, tag type not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - - cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); - if (dataLen && dataLen->type == cJSON_Number) { - columnCase.dataLen = dataLen->valueint; - } else if (dataLen && dataLen->type != cJSON_Number) { - errorPrint("%s() LN%d, failed to read json, column len not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - columnCase.dataLen = 0; + index = 0; + // tags + cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); + if (!tags || tags->type != cJSON_Array) { + errorPrint("%s() LN%d, failed to read json, tags not found\n", + __func__, __LINE__); + goto PARSE_OVER; } - for (int n = 0; n < count; ++n) { - tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - MAX_TB_NAME_SIZE); - superTbls->tags[index].dataLen = columnCase.dataLen; - index++; + int tagSize = cJSON_GetArraySize(tags); + if (tagSize > TSDB_MAX_TAGS) { + errorPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", + __func__, __LINE__, TSDB_MAX_TAGS); + goto PARSE_OVER; } - } - if (index > MAX_TAG_COUNT) { - errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", - __func__, __LINE__, MAX_TAG_COUNT); - goto PARSE_OVER; - } + //superTbls->tagCount = tagSize; + for (int k = 0; k < tagSize; ++k) { + cJSON* tag = cJSON_GetArrayItem(tags, k); + if (tag == NULL) continue; + + count = 1; + cJSON* countObj = cJSON_GetObjectItem(tag, "count"); + if (countObj && countObj->type == cJSON_Number) { + count = countObj->valueint; + } else if (countObj && countObj->type != cJSON_Number) { + printf("ERROR: failed to read json, column count not found\n"); + goto PARSE_OVER; + } else { + count = 1; + } - superTbls->tagCount = index; + // column info + memset(&columnCase, 0, sizeof(StrColumn)); + cJSON *dataType = cJSON_GetObjectItem(tag, "type"); + if (!dataType || dataType->type != cJSON_String + || dataType->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, tag type not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + tstrncpy(columnCase.dataType, dataType->valuestring, + min(DATATYPE_BUFF_LEN, strlen(dataType->valuestring) + 1)); + + cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); + if (dataLen && dataLen->type == cJSON_Number) { + columnCase.dataLen = dataLen->valueint; + } else if (dataLen && dataLen->type != cJSON_Number) { + errorPrint("%s() LN%d, failed to read json, column len not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + columnCase.dataLen = 0; + } - if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > MAX_COLUMN_COUNT) { - errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", - __func__, __LINE__, MAX_COLUMN_COUNT); - goto PARSE_OVER; - } - ret = true; + for (int n = 0; n < count; ++n) { + tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, + min(DATATYPE_BUFF_LEN, strlen(columnCase.dataType) + 1)); + superTbls->tags[index].dataLen = columnCase.dataLen; + index++; + } + } + + if (index > TSDB_MAX_TAGS) { + errorPrint("%s() LN%d, failed to read json, tags size overflow, allowed max tag count is %d\n", + __func__, __LINE__, TSDB_MAX_TAGS); + goto PARSE_OVER; + } + + superTbls->tagCount = index; + + if ((superTbls->columnCount + superTbls->tagCount + 1 /* ts */) > TSDB_MAX_COLUMNS) { + errorPrint("%s() LN%d, columns + tags is more than allowed max columns count: %d\n", + __func__, __LINE__, TSDB_MAX_COLUMNS); + goto PARSE_OVER; + } + ret = true; PARSE_OVER: - return ret; + return ret; } static bool getMetaFromInsertJsonFile(cJSON* root) { - bool ret = false; - - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } + bool ret = false; - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - printf("ERROR: failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_Dbs.port = port->valueint; - } else if (!port) { - g_Dbs.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE); - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_Dbs.password, password->valuestring, MAX_PASSWORD_SIZE); - } else if (!password) { - tstrncpy(g_Dbs.password, "taosdata", MAX_PASSWORD_SIZE); - } - - cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); - if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { - tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); - } else if (!resultfile) { - tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); - } - - cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); - if (threads && threads->type == cJSON_Number) { - g_Dbs.threadCount = threads->valueint; - } else if (!threads) { - g_Dbs.threadCount = 1; - } else { - printf("ERROR: failed to read json, threads not found\n"); - goto PARSE_OVER; - } - - cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); - if (threads2 && threads2->type == cJSON_Number) { - g_Dbs.threadCountByCreateTbl = threads2->valueint; - } else if (!threads2) { - g_Dbs.threadCountByCreateTbl = 1; - } else { - errorPrint("%s() LN%d, failed to read json, threads2 not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); - if (gInsertInterval && gInsertInterval->type == cJSON_Number) { - if (gInsertInterval->valueint <0) { - errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.insert_interval = gInsertInterval->valueint; - } else if (!gInsertInterval) { - g_args.insert_interval = 0; - } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); - if (interlaceRows && interlaceRows->type == cJSON_Number) { - if (interlaceRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - - } - g_args.interlace_rows = interlaceRows->valueint; - } else if (!interlaceRows) { - g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req - } else { - errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); - if (maxSqlLen && maxSqlLen->type == cJSON_Number) { - if (maxSqlLen->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.max_sql_len = maxSqlLen->valueint; - } else if (!maxSqlLen) { - g_args.max_sql_len = (1024*1024); - } else { - errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); - if (numRecPerReq && numRecPerReq->type == cJSON_Number) { - if (numRecPerReq->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { - printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n", - numRecPerReq->valueint, MAX_RECORDS_PER_REQ); - printf(" number of records per request value will be set to %d\n\n", - MAX_RECORDS_PER_REQ); - prompt(); - numRecPerReq->valueint = MAX_RECORDS_PER_REQ; - } - g_args.num_of_RPR = numRecPerReq->valueint; - } else if (!numRecPerReq) { - g_args.num_of_RPR = MAX_RECORDS_PER_REQ; - } else { - errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt - && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; - } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = true; // default is no, mean answer_yes. - } else { - errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n"); - goto PARSE_OVER; - } - - // rows per table need be less than insert batch - if (g_args.interlace_rows > g_args.num_of_RPR) { - printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", - g_args.interlace_rows, g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", - g_args.num_of_RPR); - prompt(); - g_args.interlace_rows = g_args.num_of_RPR; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (!dbs || dbs->type != cJSON_Array) { - printf("ERROR: failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - int dbSize = cJSON_GetArraySize(dbs); - if (dbSize > MAX_DB_COUNT) { - errorPrint( - "ERROR: failed to read json, databases size overflow, max database is %d\n", - MAX_DB_COUNT); - goto PARSE_OVER; - } - - g_Dbs.dbCount = dbSize; - for (int i = 0; i < dbSize; ++i) { - cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); - if (dbinfos == NULL) continue; - - // dbinfo - cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); - if (!dbinfo || dbinfo->type != cJSON_Object) { - printf("ERROR: failed to read json, dbinfo not found\n"); - goto PARSE_OVER; - } - - cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); - if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { - printf("ERROR: failed to read json, db name not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); - - cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); - if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { - g_Dbs.db[i].drop = true; - } else { - g_Dbs.db[i].drop = false; - } - } else if (!drop) { - g_Dbs.db[i].drop = g_args.drop_database; - } else { - errorPrint("%s() LN%d, failed to read json, drop input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); - if (precision && precision->type == cJSON_String - && precision->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - MAX_DB_NAME_SIZE); - } else if (!precision) { - //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); - } else { - printf("ERROR: failed to read json, precision not found\n"); - goto PARSE_OVER; + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_Dbs.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); } - cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); - if (update && update->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.update = update->valueint; - } else if (!update) { - g_Dbs.db[i].dbCfg.update = -1; + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); + } else if (!host) { + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - printf("ERROR: failed to read json, update not found\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, host not found\n"); + goto PARSE_OVER; } - cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); - if (replica && replica->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.replica = replica->valueint; - } else if (!replica) { - g_Dbs.db[i].dbCfg.replica = -1; - } else { - printf("ERROR: failed to read json, replica not found\n"); - goto PARSE_OVER; + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_Dbs.port = port->valueint; + } else if (!port) { + g_Dbs.port = 6030; } - cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); - if (keep && keep->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.keep = keep->valueint; - } else if (!keep) { - g_Dbs.db[i].dbCfg.keep = -1; - } else { - printf("ERROR: failed to read json, keep not found\n"); - goto PARSE_OVER; + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); + } else if (!user) { + tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE); } - cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); - if (days && days->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.days = days->valueint; - } else if (!days) { - g_Dbs.db[i].dbCfg.days = -1; - } else { - printf("ERROR: failed to read json, days not found\n"); - goto PARSE_OVER; + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_Dbs.password, password->valuestring, MAX_PASSWORD_SIZE); + } else if (!password) { + tstrncpy(g_Dbs.password, "taosdata", MAX_PASSWORD_SIZE); } - cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); - if (cache && cache->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cache = cache->valueint; - } else if (!cache) { - g_Dbs.db[i].dbCfg.cache = -1; - } else { - printf("ERROR: failed to read json, cache not found\n"); - goto PARSE_OVER; + cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); + if (resultfile && resultfile->type == cJSON_String && resultfile->valuestring != NULL) { + tstrncpy(g_Dbs.resultFile, resultfile->valuestring, MAX_FILE_NAME_LEN); + } else if (!resultfile) { + tstrncpy(g_Dbs.resultFile, "./insert_res.txt", MAX_FILE_NAME_LEN); } - cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); - if (blocks && blocks->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.blocks = blocks->valueint; - } else if (!blocks) { - g_Dbs.db[i].dbCfg.blocks = -1; + cJSON* threads = cJSON_GetObjectItem(root, "thread_count"); + if (threads && threads->type == cJSON_Number) { + g_Dbs.threadCount = threads->valueint; + } else if (!threads) { + g_Dbs.threadCount = 1; } else { - printf("ERROR: failed to read json, block not found\n"); - goto PARSE_OVER; - } - - //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); - //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; - //} else if (!maxtablesPerVnode) { - // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; - //} else { - // printf("failed to read json, maxtablesPerVnode not found"); - // goto PARSE_OVER; - //} + printf("ERROR: failed to read json, threads not found\n"); + goto PARSE_OVER; + } - cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); - if (minRows && minRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.minRows = minRows->valueint; - } else if (!minRows) { - g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default + cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); + if (threads2 && threads2->type == cJSON_Number) { + g_Dbs.threadCountByCreateTbl = threads2->valueint; + } else if (!threads2) { + g_Dbs.threadCountByCreateTbl = 1; } else { - printf("ERROR: failed to read json, minRows not found\n"); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, threads2 not found\n", + __func__, __LINE__); + goto PARSE_OVER; } - cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); - if (maxRows && maxRows->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; - } else if (!maxRows) { - g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default + cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); + if (gInsertInterval && gInsertInterval->type == cJSON_Number) { + if (gInsertInterval->valueint <0) { + errorPrint("%s() LN%d, failed to read json, insert interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_args.insert_interval = gInsertInterval->valueint; + } else if (!gInsertInterval) { + g_args.insert_interval = 0; } else { - printf("ERROR: failed to read json, maxRows not found\n"); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; } - cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); - if (comp && comp->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.comp = comp->valueint; - } else if (!comp) { - g_Dbs.db[i].dbCfg.comp = -1; + cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); + if (interlaceRows && interlaceRows->type == cJSON_Number) { + if (interlaceRows->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + + } + g_args.interlace_rows = interlaceRows->valueint; + } else if (!interlaceRows) { + g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - printf("ERROR: failed to read json, comp not found\n"); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; } - cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); - if (walLevel && walLevel->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; - } else if (!walLevel) { - g_Dbs.db[i].dbCfg.walLevel = -1; + cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); + if (maxSqlLen && maxSqlLen->type == cJSON_Number) { + if (maxSqlLen->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_args.max_sql_len = maxSqlLen->valueint; + } else if (!maxSqlLen) { + g_args.max_sql_len = (1024*1024); } else { - printf("ERROR: failed to read json, walLevel not found\n"); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; } - cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast"); - if (cacheLast && cacheLast->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.cacheLast = cacheLast->valueint; - } else if (!cacheLast) { - g_Dbs.db[i].dbCfg.cacheLast = -1; + cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); + if (numRecPerReq && numRecPerReq->type == cJSON_Number) { + if (numRecPerReq->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } else if (numRecPerReq->valueint > MAX_RECORDS_PER_REQ) { + printf("NOTICE: number of records per request value %"PRIu64" > %d\n\n", + numRecPerReq->valueint, MAX_RECORDS_PER_REQ); + printf(" number of records per request value will be set to %d\n\n", + MAX_RECORDS_PER_REQ); + prompt(); + numRecPerReq->valueint = MAX_RECORDS_PER_REQ; + } + g_args.num_of_RPR = numRecPerReq->valueint; + } else if (!numRecPerReq) { + g_args.num_of_RPR = MAX_RECORDS_PER_REQ; } else { - printf("ERROR: failed to read json, cacheLast not found\n"); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", + __func__, __LINE__); + goto PARSE_OVER; } - cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); - if (quorum && quorum->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.quorum = quorum->valueint; - } else if (!quorum) { - g_Dbs.db[i].dbCfg.quorum = 1; + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt + && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = true; // default is no, mean answer_yes. } else { - printf("failed to read json, quorum input mistake"); - goto PARSE_OVER; + errorPrint("%s", "failed to read json, confirm_parameter_prompt input mistake\n"); + goto PARSE_OVER; } - cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); - if (fsync && fsync->type == cJSON_Number) { - g_Dbs.db[i].dbCfg.fsync = fsync->valueint; - } else if (!fsync) { - g_Dbs.db[i].dbCfg.fsync = -1; - } else { - errorPrint("%s() LN%d, failed to read json, fsync input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; + // rows per table need be less than insert batch + if (g_args.interlace_rows > g_args.num_of_RPR) { + printf("NOTICE: interlace rows value %u > num_of_records_per_req %u\n\n", + g_args.interlace_rows, g_args.num_of_RPR); + printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", + g_args.num_of_RPR); + prompt(); + g_args.interlace_rows = g_args.num_of_RPR; } - // super_talbes - cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); - if (!stables || stables->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super_tables not found\n", - __func__, __LINE__); - goto PARSE_OVER; + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (!dbs || dbs->type != cJSON_Array) { + printf("ERROR: failed to read json, databases not found\n"); + goto PARSE_OVER; } - int stbSize = cJSON_GetArraySize(stables); - if (stbSize > MAX_SUPER_TABLE_COUNT) { - errorPrint( - "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", - __func__, __LINE__, MAX_SUPER_TABLE_COUNT); - goto PARSE_OVER; + int dbSize = cJSON_GetArraySize(dbs); + if (dbSize > MAX_DB_COUNT) { + errorPrint( + "ERROR: failed to read json, databases size overflow, max database is %d\n", + MAX_DB_COUNT); + goto PARSE_OVER; } - g_Dbs.db[i].superTblCount = stbSize; - for (int j = 0; j < stbSize; ++j) { - cJSON* stbInfo = cJSON_GetArrayItem(stables, j); - if (stbInfo == NULL) continue; + g_Dbs.dbCount = dbSize; + for (int i = 0; i < dbSize; ++i) { + cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); + if (dbinfos == NULL) continue; - // dbinfo - cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); - if (!stbName || stbName->type != cJSON_String - || stbName->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, stb name not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, - MAX_TB_NAME_SIZE); + // dbinfo + cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); + if (!dbinfo || dbinfo->type != cJSON_Object) { + printf("ERROR: failed to read json, dbinfo not found\n"); + goto PARSE_OVER; + } - cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); - if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { - printf("ERROR: failed to read json, childtable_prefix not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, - MAX_DB_NAME_SIZE); - - cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); - if (autoCreateTbl - && autoCreateTbl->type == cJSON_String - && autoCreateTbl->valuestring != NULL) { - if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) - && (TBL_ALREADY_EXISTS != g_Dbs.db[i].superTbls[j].childTblExists)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; - } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } - } else if (!autoCreateTbl) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } else { - printf("ERROR: failed to read json, auto_create_table not found\n"); - goto PARSE_OVER; - } - - cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); - if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; - } else if (!batchCreateTbl) { - g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000; - } else { - printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); - goto PARSE_OVER; - } - - cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no - if (childTblExists - && childTblExists->type == cJSON_String - && childTblExists->valuestring != NULL) { - if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) - && (g_Dbs.db[i].drop == false)) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; - } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) - || (g_Dbs.db[i].drop == true))) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); + if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { + printf("ERROR: failed to read json, db name not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); + + cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); + if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { + if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { + g_Dbs.db[i].drop = true; + } else { + g_Dbs.db[i].drop = false; + } + } else if (!drop) { + g_Dbs.db[i].drop = g_args.drop_database; } else { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + errorPrint("%s() LN%d, failed to read json, drop input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; } - } else if (!childTblExists) { - g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; - } else { - errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { - g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; - } + cJSON *precision = cJSON_GetObjectItem(dbinfo, "precision"); + if (precision && precision->type == cJSON_String + && precision->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, + SMALL_BUFF_LEN); + } else if (!precision) { + memset(g_Dbs.db[i].dbCfg.precision, 0, SMALL_BUFF_LEN); + } else { + printf("ERROR: failed to read json, precision not found\n"); + goto PARSE_OVER; + } - cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); - if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; - - cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); - if (dataSource && dataSource->type == cJSON_String - && dataSource->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, MAX_DB_NAME_SIZE); - } else if (!dataSource) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); - } else { - errorPrint("%s() LN%d, failed to read json, data_source not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt - if (stbIface && stbIface->type == cJSON_String - && stbIface->valuestring != NULL) { - if (0 == strcasecmp(stbIface->valuestring, "taosc")) { - g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; - } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { - g_Dbs.db[i].superTbls[j].iface= REST_IFACE; -#if STMT_IFACE_ENABLED == 1 - } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { - g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; -#endif + cJSON* update = cJSON_GetObjectItem(dbinfo, "update"); + if (update && update->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.update = update->valueint; + } else if (!update) { + g_Dbs.db[i].dbCfg.update = -1; } else { - errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", - __func__, __LINE__, stbIface->valuestring); + printf("ERROR: failed to read json, update not found\n"); goto PARSE_OVER; } - } else if (!stbIface) { - g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; - } else { - errorPrint("%s", "failed to read json, insert_mode not found\n"); - goto PARSE_OVER; - } - cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); - if ((childTbl_limit) && (g_Dbs.db[i].drop != true) - && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { - if (childTbl_limit->type != cJSON_Number) { - printf("ERROR: failed to read json, childtable_limit\n"); + cJSON* replica = cJSON_GetObjectItem(dbinfo, "replica"); + if (replica && replica->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.replica = replica->valueint; + } else if (!replica) { + g_Dbs.db[i].dbCfg.replica = -1; + } else { + printf("ERROR: failed to read json, replica not found\n"); goto PARSE_OVER; } - g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. - } - - cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); - if ((childTbl_offset) && (g_Dbs.db[i].drop != true) - && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { - if ((childTbl_offset->type != cJSON_Number) - || (0 > childTbl_offset->valueint)) { - printf("ERROR: failed to read json, childtable_offset\n"); + + cJSON* keep = cJSON_GetObjectItem(dbinfo, "keep"); + if (keep && keep->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.keep = keep->valueint; + } else if (!keep) { + g_Dbs.db[i].dbCfg.keep = -1; + } else { + printf("ERROR: failed to read json, keep not found\n"); goto PARSE_OVER; } - g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; - } else { - g_Dbs.db[i].superTbls[j].childTblOffset = 0; - } - - cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); - if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, MAX_DB_NAME_SIZE); - } else if (!ts) { - tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - "now", MAX_DB_NAME_SIZE); - } else { - printf("ERROR: failed to read json, start_timestamp not found\n"); - goto PARSE_OVER; - } - - cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); - if (timestampStep && timestampStep->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; - } else if (!timestampStep) { - g_Dbs.db[i].superTbls[j].timeStampStep = DEFAULT_TIMESTAMP_STEP; - } else { - printf("ERROR: failed to read json, timestamp_step not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type - == cJSON_String && sampleFormat->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, MAX_DB_NAME_SIZE); - } else if (!sampleFormat) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); - } else { - printf("ERROR: failed to read json, sample_format not found\n"); - goto PARSE_OVER; - } - - cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); - if (sampleFile && sampleFile->type == cJSON_String - && sampleFile->valuestring != NULL) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, - sampleFile->valuestring, MAX_FILE_NAME_LEN); - } else if (!sampleFile) { - memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, MAX_FILE_NAME_LEN); - } else { - printf("ERROR: failed to read json, sample_file not found\n"); - goto PARSE_OVER; - } - - cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); - if ((tagsFile && tagsFile->type == cJSON_String) - && (tagsFile->valuestring != NULL)) { - tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, - tagsFile->valuestring, MAX_FILE_NAME_LEN); - if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { - g_Dbs.db[i].superTbls[j].tagSource = 0; + + cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); + if (days && days->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.days = days->valueint; + } else if (!days) { + g_Dbs.db[i].dbCfg.days = -1; } else { - g_Dbs.db[i].superTbls[j].tagSource = 1; + printf("ERROR: failed to read json, days not found\n"); + goto PARSE_OVER; } - } else if (!tagsFile) { - memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); - g_Dbs.db[i].superTbls[j].tagSource = 0; - } else { - printf("ERROR: failed to read json, tags_file not found\n"); - goto PARSE_OVER; - } - - cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); - if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { - int32_t len = stbMaxSqlLen->valueint; - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < 5) { - len = 5; - } - g_Dbs.db[i].superTbls[j].maxSqlLen = len; - } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; - } else { - errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } -/* - cJSON *multiThreadWriteOneTbl = - cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes - if (multiThreadWriteOneTbl - && multiThreadWriteOneTbl->type == cJSON_String - && multiThreadWriteOneTbl->valuestring != NULL) { - if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; + + cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); + if (cache && cache->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cache = cache->valueint; + } else if (!cache) { + g_Dbs.db[i].dbCfg.cache = -1; } else { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + printf("ERROR: failed to read json, cache not found\n"); + goto PARSE_OVER; } - } else if (!multiThreadWriteOneTbl) { - g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } else { - printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n"); - goto PARSE_OVER; - } -*/ - cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); - if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { - if (stbInterlaceRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; - // rows per table need be less than insert batch - if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { - printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %u > num_of_records_per_req %u\n\n", - i, j, g_Dbs.db[i].superTbls[j].interlaceRows, - g_args.num_of_RPR); - printf(" interlace rows value will be set to num_of_records_per_req %u\n\n", - g_args.num_of_RPR); - prompt(); - g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR; - } - } else if (!stbInterlaceRows) { - g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req - } else { - errorPrint( - "%s() LN%d, failed to read json, interlace rows input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); - if (disorderRatio && disorderRatio->type == cJSON_Number) { - if (disorderRatio->valueint > 50) - disorderRatio->valueint = 50; + cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); + if (blocks && blocks->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.blocks = blocks->valueint; + } else if (!blocks) { + g_Dbs.db[i].dbCfg.blocks = -1; + } else { + printf("ERROR: failed to read json, block not found\n"); + goto PARSE_OVER; + } - if (disorderRatio->valueint < 0) - disorderRatio->valueint = 0; + //cJSON* maxtablesPerVnode= cJSON_GetObjectItem(dbinfo, "maxtablesPerVnode"); + //if (maxtablesPerVnode && maxtablesPerVnode->type == cJSON_Number) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = maxtablesPerVnode->valueint; + //} else if (!maxtablesPerVnode) { + // g_Dbs.db[i].dbCfg.maxtablesPerVnode = TSDB_DEFAULT_TABLES; + //} else { + // printf("failed to read json, maxtablesPerVnode not found"); + // goto PARSE_OVER; + //} + + cJSON* minRows= cJSON_GetObjectItem(dbinfo, "minRows"); + if (minRows && minRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.minRows = minRows->valueint; + } else if (!minRows) { + g_Dbs.db[i].dbCfg.minRows = 0; // 0 means default + } else { + printf("ERROR: failed to read json, minRows not found\n"); + goto PARSE_OVER; + } - g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; - } else if (!disorderRatio) { - g_Dbs.db[i].superTbls[j].disorderRatio = 0; - } else { - printf("ERROR: failed to read json, disorderRatio not found\n"); - goto PARSE_OVER; - } - - cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); - if (disorderRange && disorderRange->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; - } else if (!disorderRange) { - g_Dbs.db[i].superTbls[j].disorderRange = 1000; - } else { - printf("ERROR: failed to read json, disorderRange not found\n"); - goto PARSE_OVER; - } + cJSON* maxRows= cJSON_GetObjectItem(dbinfo, "maxRows"); + if (maxRows && maxRows->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.maxRows = maxRows->valueint; + } else if (!maxRows) { + g_Dbs.db[i].dbCfg.maxRows = 0; // 0 means default + } else { + printf("ERROR: failed to read json, maxRows not found\n"); + goto PARSE_OVER; + } - cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); - if (insertRows && insertRows->type == cJSON_Number) { - if (insertRows->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; + cJSON* comp= cJSON_GetObjectItem(dbinfo, "comp"); + if (comp && comp->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.comp = comp->valueint; + } else if (!comp) { + g_Dbs.db[i].dbCfg.comp = -1; + } else { + printf("ERROR: failed to read json, comp not found\n"); + goto PARSE_OVER; } - g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; - } else if (!insertRows) { - g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; - } else { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval"); - if (insertInterval && insertInterval->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; - if (insertInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; + cJSON* walLevel= cJSON_GetObjectItem(dbinfo, "walLevel"); + if (walLevel && walLevel->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.walLevel = walLevel->valueint; + } else if (!walLevel) { + g_Dbs.db[i].dbCfg.walLevel = -1; + } else { + printf("ERROR: failed to read json, walLevel not found\n"); + goto PARSE_OVER; } - } else if (!insertInterval) { - verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n", - __func__, __LINE__, g_args.insert_interval); - g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; - } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - int retVal = getColumnAndTagTypeFromInsertJsonFile( - stbInfo, &g_Dbs.db[i].superTbls[j]); - if (false == retVal) { - goto PARSE_OVER; - } + cJSON* cacheLast= cJSON_GetObjectItem(dbinfo, "cachelast"); + if (cacheLast && cacheLast->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.cacheLast = cacheLast->valueint; + } else if (!cacheLast) { + g_Dbs.db[i].dbCfg.cacheLast = -1; + } else { + printf("ERROR: failed to read json, cacheLast not found\n"); + goto PARSE_OVER; + } + + cJSON* quorum= cJSON_GetObjectItem(dbinfo, "quorum"); + if (quorum && quorum->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.quorum = quorum->valueint; + } else if (!quorum) { + g_Dbs.db[i].dbCfg.quorum = 1; + } else { + printf("failed to read json, quorum input mistake"); + goto PARSE_OVER; + } + + cJSON* fsync= cJSON_GetObjectItem(dbinfo, "fsync"); + if (fsync && fsync->type == cJSON_Number) { + g_Dbs.db[i].dbCfg.fsync = fsync->valueint; + } else if (!fsync) { + g_Dbs.db[i].dbCfg.fsync = -1; + } else { + errorPrint("%s() LN%d, failed to read json, fsync input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + // super_talbes + cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); + if (!stables || stables->type != cJSON_Array) { + errorPrint("%s() LN%d, failed to read json, super_tables not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + int stbSize = cJSON_GetArraySize(stables); + if (stbSize > MAX_SUPER_TABLE_COUNT) { + errorPrint( + "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", + __func__, __LINE__, MAX_SUPER_TABLE_COUNT); + goto PARSE_OVER; + } + + g_Dbs.db[i].superTblCount = stbSize; + for (int j = 0; j < stbSize; ++j) { + cJSON* stbInfo = cJSON_GetArrayItem(stables, j); + if (stbInfo == NULL) continue; + + // dbinfo + cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); + if (!stbName || stbName->type != cJSON_String + || stbName->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, stb name not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, + TSDB_TABLE_NAME_LEN); + + cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); + if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { + printf("ERROR: failed to read json, childtable_prefix not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, + TBNAME_PREFIX_LEN); + + cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); + if (autoCreateTbl + && autoCreateTbl->type == cJSON_String + && autoCreateTbl->valuestring != NULL) { + if ((0 == strncasecmp(autoCreateTbl->valuestring, "yes", 3)) + && (TBL_ALREADY_EXISTS != g_Dbs.db[i].superTbls[j].childTblExists)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = AUTO_CREATE_SUBTBL; + } else if (0 == strncasecmp(autoCreateTbl->valuestring, "no", 2)) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + } else if (!autoCreateTbl) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } else { + printf("ERROR: failed to read json, auto_create_table not found\n"); + goto PARSE_OVER; + } + + cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); + if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; + } else if (!batchCreateTbl) { + g_Dbs.db[i].superTbls[j].batchCreateTableNum = 1000; + } else { + printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); + goto PARSE_OVER; + } + + cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no + if (childTblExists + && childTblExists->type == cJSON_String + && childTblExists->valuestring != NULL) { + if ((0 == strncasecmp(childTblExists->valuestring, "yes", 3)) + && (g_Dbs.db[i].drop == false)) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_ALREADY_EXISTS; + } else if ((0 == strncasecmp(childTblExists->valuestring, "no", 2) + || (g_Dbs.db[i].drop == true))) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } + } else if (!childTblExists) { + g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; + } else { + errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { + g_Dbs.db[i].superTbls[j].autoCreateTable = PRE_CREATE_SUBTBL; + } + + cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); + if (!count || count->type != cJSON_Number || 0 >= count->valueint) { + errorPrint("%s() LN%d, failed to read json, childtable_count input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; + + cJSON *dataSource = cJSON_GetObjectItem(stbInfo, "data_source"); + if (dataSource && dataSource->type == cJSON_String + && dataSource->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, + dataSource->valuestring, + min(SMALL_BUFF_LEN, strlen(dataSource->valuestring) + 1)); + } else if (!dataSource) { + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", + min(SMALL_BUFF_LEN, strlen("rand") + 1)); + } else { + errorPrint("%s() LN%d, failed to read json, data_source not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON *stbIface = cJSON_GetObjectItem(stbInfo, "insert_mode"); // taosc , rest, stmt + if (stbIface && stbIface->type == cJSON_String + && stbIface->valuestring != NULL) { + if (0 == strcasecmp(stbIface->valuestring, "taosc")) { + g_Dbs.db[i].superTbls[j].iface= TAOSC_IFACE; + } else if (0 == strcasecmp(stbIface->valuestring, "rest")) { + g_Dbs.db[i].superTbls[j].iface= REST_IFACE; +#if STMT_IFACE_ENABLED == 1 + } else if (0 == strcasecmp(stbIface->valuestring, "stmt")) { + g_Dbs.db[i].superTbls[j].iface= STMT_IFACE; +#endif + } else { + errorPrint("%s() LN%d, failed to read json, insert_mode %s not recognized\n", + __func__, __LINE__, stbIface->valuestring); + goto PARSE_OVER; + } + } else if (!stbIface) { + g_Dbs.db[i].superTbls[j].iface = TAOSC_IFACE; + } else { + errorPrint("%s", "failed to read json, insert_mode not found\n"); + goto PARSE_OVER; + } + + cJSON* childTbl_limit = cJSON_GetObjectItem(stbInfo, "childtable_limit"); + if ((childTbl_limit) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { + if (childTbl_limit->type != cJSON_Number) { + printf("ERROR: failed to read json, childtable_limit\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblLimit = childTbl_limit->valueint; + } else { + g_Dbs.db[i].superTbls[j].childTblLimit = -1; // select ... limit -1 means all query result, drop = yes mean all table need recreate, limit value is invalid. + } + + cJSON* childTbl_offset = cJSON_GetObjectItem(stbInfo, "childtable_offset"); + if ((childTbl_offset) && (g_Dbs.db[i].drop != true) + && (g_Dbs.db[i].superTbls[j].childTblExists == TBL_ALREADY_EXISTS)) { + if ((childTbl_offset->type != cJSON_Number) + || (0 > childTbl_offset->valueint)) { + printf("ERROR: failed to read json, childtable_offset\n"); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].childTblOffset = childTbl_offset->valueint; + } else { + g_Dbs.db[i].superTbls[j].childTblOffset = 0; + } + + cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); + if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, + ts->valuestring, TSDB_DB_NAME_LEN); + } else if (!ts) { + tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, + "now", TSDB_DB_NAME_LEN); + } else { + printf("ERROR: failed to read json, start_timestamp not found\n"); + goto PARSE_OVER; + } + + cJSON* timestampStep = cJSON_GetObjectItem(stbInfo, "timestamp_step"); + if (timestampStep && timestampStep->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].timeStampStep = timestampStep->valueint; + } else if (!timestampStep) { + g_Dbs.db[i].superTbls[j].timeStampStep = g_args.timestamp_step; + } else { + printf("ERROR: failed to read json, timestamp_step not found\n"); + goto PARSE_OVER; + } + + cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); + if (sampleFormat && sampleFormat->type + == cJSON_String && sampleFormat->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, + sampleFormat->valuestring, + min(SMALL_BUFF_LEN, + strlen(sampleFormat->valuestring) + 1)); + } else if (!sampleFormat) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", + SMALL_BUFF_LEN); + } else { + printf("ERROR: failed to read json, sample_format not found\n"); + goto PARSE_OVER; + } + + cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); + if (sampleFile && sampleFile->type == cJSON_String + && sampleFile->valuestring != NULL) { + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFile, + sampleFile->valuestring, + min(MAX_FILE_NAME_LEN, + strlen(sampleFile->valuestring) + 1)); + } else if (!sampleFile) { + memset(g_Dbs.db[i].superTbls[j].sampleFile, 0, + MAX_FILE_NAME_LEN); + } else { + printf("ERROR: failed to read json, sample_file not found\n"); + goto PARSE_OVER; + } + + cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); + if ((tagsFile && tagsFile->type == cJSON_String) + && (tagsFile->valuestring != NULL)) { + tstrncpy(g_Dbs.db[i].superTbls[j].tagsFile, + tagsFile->valuestring, MAX_FILE_NAME_LEN); + if (0 == g_Dbs.db[i].superTbls[j].tagsFile[0]) { + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + g_Dbs.db[i].superTbls[j].tagSource = 1; + } + } else if (!tagsFile) { + memset(g_Dbs.db[i].superTbls[j].tagsFile, 0, MAX_FILE_NAME_LEN); + g_Dbs.db[i].superTbls[j].tagSource = 0; + } else { + printf("ERROR: failed to read json, tags_file not found\n"); + goto PARSE_OVER; + } + + cJSON* stbMaxSqlLen = cJSON_GetObjectItem(stbInfo, "max_sql_len"); + if (stbMaxSqlLen && stbMaxSqlLen->type == cJSON_Number) { + int32_t len = stbMaxSqlLen->valueint; + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < 5) { + len = 5; + } + g_Dbs.db[i].superTbls[j].maxSqlLen = len; + } else if (!maxSqlLen) { + g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; + } else { + errorPrint("%s() LN%d, failed to read json, stbMaxSqlLen input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + /* + cJSON *multiThreadWriteOneTbl = + cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes + if (multiThreadWriteOneTbl + && multiThreadWriteOneTbl->type == cJSON_String + && multiThreadWriteOneTbl->valuestring != NULL) { + if (0 == strncasecmp(multiThreadWriteOneTbl->valuestring, "yes", 3)) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; + } else { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } + } else if (!multiThreadWriteOneTbl) { + g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; + } else { + printf("ERROR: failed to read json, multiThreadWriteOneTbl not found\n"); + goto PARSE_OVER; + } + */ + cJSON* insertRows = cJSON_GetObjectItem(stbInfo, "insert_rows"); + if (insertRows && insertRows->type == cJSON_Number) { + if (insertRows->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].insertRows = insertRows->valueint; + } else if (!insertRows) { + g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; + } else { + errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* stbInterlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); + if (stbInterlaceRows && stbInterlaceRows->type == cJSON_Number) { + if (stbInterlaceRows->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interlace rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_Dbs.db[i].superTbls[j].interlaceRows = stbInterlaceRows->valueint; + + if (g_Dbs.db[i].superTbls[j].interlaceRows > g_Dbs.db[i].superTbls[j].insertRows) { + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %u > insert_rows %"PRId64"\n\n", + i, j, g_Dbs.db[i].superTbls[j].interlaceRows, + g_Dbs.db[i].superTbls[j].insertRows); + printf(" interlace rows value will be set to insert_rows %"PRId64"\n\n", + g_Dbs.db[i].superTbls[j].insertRows); + prompt(); + g_Dbs.db[i].superTbls[j].interlaceRows = g_Dbs.db[i].superTbls[j].insertRows; + } + } else if (!stbInterlaceRows) { + g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + } else { + errorPrint( + "%s() LN%d, failed to read json, interlace rows input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); + if (disorderRatio && disorderRatio->type == cJSON_Number) { + if (disorderRatio->valueint > 50) + disorderRatio->valueint = 50; + + if (disorderRatio->valueint < 0) + disorderRatio->valueint = 0; + + g_Dbs.db[i].superTbls[j].disorderRatio = disorderRatio->valueint; + } else if (!disorderRatio) { + g_Dbs.db[i].superTbls[j].disorderRatio = 0; + } else { + printf("ERROR: failed to read json, disorderRatio not found\n"); + goto PARSE_OVER; + } + + cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); + if (disorderRange && disorderRange->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].disorderRange = disorderRange->valueint; + } else if (!disorderRange) { + g_Dbs.db[i].superTbls[j].disorderRange = 1000; + } else { + printf("ERROR: failed to read json, disorderRange not found\n"); + goto PARSE_OVER; + } + + cJSON* insertInterval = cJSON_GetObjectItem(stbInfo, "insert_interval"); + if (insertInterval && insertInterval->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; + if (insertInterval->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } else if (!insertInterval) { + verbosePrint("%s() LN%d: stable insert interval be overrided by global %"PRIu64".\n", + __func__, __LINE__, g_args.insert_interval); + g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; + } else { + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + int retVal = getColumnAndTagTypeFromInsertJsonFile( + stbInfo, &g_Dbs.db[i].superTbls[j]); + if (false == retVal) { + goto PARSE_OVER; + } + } } - } - ret = true; + ret = true; PARSE_OVER: - return ret; + return ret; } static bool getMetaFromQueryJsonFile(cJSON* root) { - bool ret = false; + bool ret = false; - cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); - if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { - tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); - } + cJSON* cfgdir = cJSON_GetObjectItem(root, "cfgdir"); + if (cfgdir && cfgdir->type == cJSON_String && cfgdir->valuestring != NULL) { + tstrncpy(g_queryInfo.cfgDir, cfgdir->valuestring, MAX_FILE_NAME_LEN); + } - cJSON* host = cJSON_GetObjectItem(root, "host"); - if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); - } else if (!host) { - tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } else { - printf("ERROR: failed to read json, host not found\n"); - goto PARSE_OVER; - } - - cJSON* port = cJSON_GetObjectItem(root, "port"); - if (port && port->type == cJSON_Number) { - g_queryInfo.port = port->valueint; - } else if (!port) { - g_queryInfo.port = 6030; - } - - cJSON* user = cJSON_GetObjectItem(root, "user"); - if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); - } else if (!user) { - tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; - } - - cJSON* password = cJSON_GetObjectItem(root, "password"); - if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE); - } else if (!password) { - tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);; - } - - cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt && answerPrompt->type == cJSON_String - && answerPrompt->valuestring != NULL) { - if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { - g_args.answer_yes = false; - } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { - g_args.answer_yes = true; + cJSON* host = cJSON_GetObjectItem(root, "host"); + if (host && host->type == cJSON_String && host->valuestring != NULL) { + tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); + } else if (!host) { + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { - g_args.answer_yes = false; - } - } else if (!answerPrompt) { - g_args.answer_yes = false; - } else { - printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); - goto PARSE_OVER; - } - - cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); - if (gQueryTimes && gQueryTimes->type == cJSON_Number) { - if (gQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_args.query_times = gQueryTimes->valueint; - } else if (!gQueryTimes) { - g_args.query_times = 1; - } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* dbs = cJSON_GetObjectItem(root, "databases"); - if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); - } else if (!dbs) { - printf("ERROR: failed to read json, databases not found\n"); - goto PARSE_OVER; - } - - cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); - if (queryMode && queryMode->type == cJSON_String && queryMode->valuestring != NULL) { - tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, MAX_TB_NAME_SIZE); - } else if (!queryMode) { - tstrncpy(g_queryInfo.queryMode, "taosc", MAX_TB_NAME_SIZE); - } else { - printf("ERROR: failed to read json, query_mode not found\n"); - goto PARSE_OVER; - } - - // specified_table_query - cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!specifiedQuery) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, super_table_query not found\n"); - goto PARSE_OVER; - } else { - cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); - if (queryInterval && queryInterval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; - } else if (!queryInterval) { - g_queryInfo.specifiedQueryInfo.queryInterval = 0; - } - - cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, - "query_times"); - if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { - if (specifiedQueryTimes->valueint <= 0) { - errorPrint( - "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, specifiedQueryTimes->valueint); + printf("ERROR: failed to read json, host not found\n"); goto PARSE_OVER; + } - } - g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; - } else if (!specifiedQueryTimes) { - g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; + cJSON* port = cJSON_GetObjectItem(root, "port"); + if (port && port->type == cJSON_Number) { + g_queryInfo.port = port->valueint; + } else if (!port) { + g_queryInfo.port = 6030; + } + + cJSON* user = cJSON_GetObjectItem(root, "user"); + if (user && user->type == cJSON_String && user->valuestring != NULL) { + tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); + } else if (!user) { + tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; + } + + cJSON* password = cJSON_GetObjectItem(root, "password"); + if (password && password->type == cJSON_String && password->valuestring != NULL) { + tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE); + } else if (!password) { + tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);; + } + + cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, + if (answerPrompt && answerPrompt->type == cJSON_String + && answerPrompt->valuestring != NULL) { + if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { + g_args.answer_yes = false; + } else if (0 == strncasecmp(answerPrompt->valuestring, "no", 2)) { + g_args.answer_yes = true; + } else { + g_args.answer_yes = false; + } + } else if (!answerPrompt) { + g_args.answer_yes = false; } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; + printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); + goto PARSE_OVER; } - cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - if (concurrent->valueint <= 0) { - errorPrint( - "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", - __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount, - g_queryInfo.specifiedQueryInfo.concurrent); + cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); + if (gQueryTimes && gQueryTimes->type == cJSON_Number) { + if (gQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_args.query_times = gQueryTimes->valueint; + } else if (!gQueryTimes) { + g_args.query_times = 1; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); goto PARSE_OVER; - } - g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.specifiedQueryInfo.concurrent = 1; - } - - cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); - if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String - && specifiedAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { - g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); + } + + cJSON* dbs = cJSON_GetObjectItem(root, "databases"); + if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { + tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); + } else if (!dbs) { + printf("ERROR: failed to read json, databases not found\n"); goto PARSE_OVER; - } + } + + cJSON* queryMode = cJSON_GetObjectItem(root, "query_mode"); + if (queryMode + && queryMode->type == cJSON_String + && queryMode->valuestring != NULL) { + tstrncpy(g_queryInfo.queryMode, queryMode->valuestring, + min(SMALL_BUFF_LEN, strlen(queryMode->valuestring) + 1)); + } else if (!queryMode) { + tstrncpy(g_queryInfo.queryMode, "taosc", + min(SMALL_BUFF_LEN, strlen("taosc") + 1)); } else { - g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; - } - - cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; - } else if (!interval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; - } - - cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); - if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", restart->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeRestart = false; - } else { - printf("ERROR: failed to read json, subscribe restart error\n"); + printf("ERROR: failed to read json, query_mode not found\n"); goto PARSE_OVER; - } - } else { - g_queryInfo.specifiedQueryInfo.subscribeRestart = true; - } - - cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); - if (keepProgress - && keepProgress->type == cJSON_String - && keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; - } else { - printf("ERROR: failed to read json, subscribe keepProgress error\n"); + } + + // specified_table_query + cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); + if (!specifiedQuery) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedQuery->type != cJSON_Object) { + printf("ERROR: failed to read json, super_table_query not found\n"); goto PARSE_OVER; - } } else { - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + cJSON* queryInterval = cJSON_GetObjectItem(specifiedQuery, "query_interval"); + if (queryInterval && queryInterval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.queryInterval = queryInterval->valueint; + } else if (!queryInterval) { + g_queryInfo.specifiedQueryInfo.queryInterval = 0; + } + + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, + "query_times"); + if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { + if (specifiedQueryTimes->valueint <= 0) { + errorPrint( + "%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, specifiedQueryTimes->valueint); + goto PARSE_OVER; + + } + g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; + } else if (!specifiedQueryTimes) { + g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); + if (concurrent && concurrent->type == cJSON_Number) { + if (concurrent->valueint <= 0) { + errorPrint( + "%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); + goto PARSE_OVER; + } + g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + } else if (!concurrent) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + } + + cJSON* specifiedAsyncMode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (specifiedAsyncMode && specifiedAsyncMode->type == cJSON_String + && specifiedAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", specifiedAsyncMode->valuestring)) { + g_queryInfo.specifiedQueryInfo.asyncMode = ASYNC_MODE; + } else { + errorPrint("%s() LN%d, failed to read json, async mode input error\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.asyncMode = SYNC_MODE; + } + + cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); + if (interval && interval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; + } else if (!interval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; + } + + cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); + if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { + if (0 == strcmp("yes", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } else if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = false; + } else { + printf("ERROR: failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeRestart = true; + } + + cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); + if (keepProgress + && keepProgress->type == cJSON_String + && keepProgress->valuestring != NULL) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } else { + printf("ERROR: failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); + if (!specifiedSqls) { + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedSqls->type != cJSON_Array) { + errorPrint("%s() LN%d, failed to read json, super sqls not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(specifiedSqls); + if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent + > MAX_QUERY_SQL_COUNT) { + errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", + __func__, __LINE__, + superSqlSize, + g_queryInfo.specifiedQueryInfo.concurrent, + MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("ERROR: failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], + sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + // default value is -1, which mean infinite loop + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + cJSON* endAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); + if (endAfterConsume + && endAfterConsume->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] + = endAfterConsume->valueint; + } + if (g_queryInfo.specifiedQueryInfo.endAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; + + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + cJSON* resubAfterConsume = + cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); + if ((resubAfterConsume) + && (resubAfterConsume->type == cJSON_Number) + && (resubAfterConsume->valueint >= 0)) { + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] + = resubAfterConsume->valueint; + } + + if (g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] < -1) + g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if ((NULL != result) && (result->type == cJSON_String) + && (result->valuestring != NULL)) { + tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.specifiedQueryInfo.result[j], + 0, MAX_FILE_NAME_LEN); + } else { + printf("ERROR: failed to read json, super query result file not found\n"); + goto PARSE_OVER; + } + } + } } - // sqls - cJSON* specifiedSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); - if (!specifiedSqls) { - g_queryInfo.specifiedQueryInfo.sqlCount = 0; - } else if (specifiedSqls->type != cJSON_Array) { - errorPrint("%s() LN%d, failed to read json, super sqls not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(specifiedSqls); - if (superSqlSize * g_queryInfo.specifiedQueryInfo.concurrent - > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql(%d) * concurrent(%d) overflow, max is %d\n", - __func__, __LINE__, - superSqlSize, - g_queryInfo.specifiedQueryInfo.concurrent, - MAX_QUERY_SQL_COUNT); + // super_table_query + cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); + if (!superQuery) { + g_queryInfo.superQueryInfo.threadCnt = 1; + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superQuery->type != cJSON_Object) { + printf("ERROR: failed to read json, sub_table_query not found\n"); + ret = true; goto PARSE_OVER; - } - - g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(specifiedSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], - sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - cJSON* endAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "endAfterConsume"); - if (endAfterConsume - && endAfterConsume->type == cJSON_Number) { - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] - = endAfterConsume->valueint; - } else if (!endAfterConsume) { - // default value is -1, which mean infinite loop - g_queryInfo.specifiedQueryInfo.endAfterConsume[j] = -1; - } - - cJSON* resubAfterConsume = - cJSON_GetObjectItem(specifiedQuery, "resubAfterConsume"); - if ((resubAfterConsume) - && (resubAfterConsume->type == cJSON_Number) - && (resubAfterConsume->valueint >= 0)) { - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] - = resubAfterConsume->valueint; - } else if (!resubAfterConsume) { - // default value is -1, which mean do not resub - g_queryInfo.specifiedQueryInfo.resubAfterConsume[j] = -1; - } - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if ((NULL != result) && (result->type == cJSON_String) - && (result->valuestring != NULL)) { - tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.specifiedQueryInfo.result[j], - 0, MAX_FILE_NAME_LEN); + } else { + cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); + if (subrate && subrate->type == cJSON_Number) { + g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; + } else if (!subrate) { + g_queryInfo.superQueryInfo.queryInterval = 0; + } + + cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); + if (superQueryTimes && superQueryTimes->type == cJSON_Number) { + if (superQueryTimes->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", + __func__, __LINE__, superQueryTimes->valueint); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; + } else if (!superQueryTimes) { + g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; } else { - printf("ERROR: failed to read json, super query result file not found\n"); - goto PARSE_OVER; + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; } - } - } - } - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.threadCnt = 1; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, sub_table_query not found\n"); - ret = true; - goto PARSE_OVER; - } else { - cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.queryInterval = subrate->valueint; - } else if (!subrate) { - g_queryInfo.superQueryInfo.queryInterval = 0; - } - - cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); - if (superQueryTimes && superQueryTimes->type == cJSON_Number) { - if (superQueryTimes->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, query_times: %"PRId64", need be a valid (>0) number\n", - __func__, __LINE__, superQueryTimes->valueint); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; - } else if (!superQueryTimes) { - g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; - } else { - errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } + cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); + if (threads && threads->type == cJSON_Number) { + if (threads->valueint <= 0) { + errorPrint("%s() LN%d, failed to read json, threads input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; - cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - if (threads->valueint <= 0) { - errorPrint("%s() LN%d, failed to read json, threads input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.threadCnt = threads->valueint; + } else if (!threads) { + g_queryInfo.superQueryInfo.threadCnt = 1; + } - } - g_queryInfo.superQueryInfo.threadCnt = threads->valueint; - } else if (!threads) { - g_queryInfo.superQueryInfo.threadCnt = 1; - } + //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); + //if (subTblCnt && subTblCnt->type == cJSON_Number) { + // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; + //} else if (!subTblCnt) { + // g_queryInfo.superQueryInfo.childTblCount = 0; + //} + + cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); + if (stblname && stblname->type == cJSON_String + && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, + TSDB_TABLE_NAME_LEN); + } else { + errorPrint("%s() LN%d, failed to read json, super table name input error\n", + __func__, __LINE__); + goto PARSE_OVER; + } - //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); - //if (subTblCnt && subTblCnt->type == cJSON_Number) { - // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.superQueryInfo.childTblCount = 0; - //} + cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); + if (superAsyncMode && superAsyncMode->type == cJSON_String + && superAsyncMode->valuestring != NULL) { + if (0 == strcmp("sync", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } else if (0 == strcmp("async", superAsyncMode->valuestring)) { + g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; + } else { + errorPrint("%s() LN%d, failed to read json, async mode input error\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; + } - cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); - if (stblname && stblname->type == cJSON_String - && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, - MAX_TB_NAME_SIZE); - } else { - errorPrint("%s() LN%d, failed to read json, super table name input error\n", - __func__, __LINE__); - goto PARSE_OVER; - } - - cJSON* superAsyncMode = cJSON_GetObjectItem(superQuery, "mode"); - if (superAsyncMode && superAsyncMode->type == cJSON_String - && superAsyncMode->valuestring != NULL) { - if (0 == strcmp("sync", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } else if (0 == strcmp("async", superAsyncMode->valuestring)) { - g_queryInfo.superQueryInfo.asyncMode = ASYNC_MODE; - } else { - errorPrint("%s() LN%d, failed to read json, async mode input error\n", - __func__, __LINE__); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.asyncMode = SYNC_MODE; - } + cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); + if (superInterval && superInterval->type == cJSON_Number) { + if (superInterval->valueint < 0) { + errorPrint("%s() LN%d, failed to read json, interval input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } + g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; + } else if (!superInterval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.superQueryInfo.subscribeInterval = 10000; + } - cJSON* superInterval = cJSON_GetObjectItem(superQuery, "interval"); - if (superInterval && superInterval->type == cJSON_Number) { - if (superInterval->valueint < 0) { - errorPrint("%s() LN%d, failed to read json, interval input mistake\n", - __func__, __LINE__); - goto PARSE_OVER; - } - g_queryInfo.superQueryInfo.subscribeInterval = superInterval->valueint; - } else if (!superInterval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.superQueryInfo.subscribeInterval = 10000; - } - - cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String - && subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.superQueryInfo.subscribeRestart = false; - } else { - printf("ERROR: failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeRestart = true; - } - - cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (superkeepProgress && - superkeepProgress->type == cJSON_String - && superkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", superkeepProgress->valuestring)) { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } else { - printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; - } + cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); + if (subrestart && subrestart->type == cJSON_String + && subrestart->valuestring != NULL) { + if (0 == strcmp("yes", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } else if (0 == strcmp("no", subrestart->valuestring)) { + g_queryInfo.superQueryInfo.subscribeRestart = false; + } else { + printf("ERROR: failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeRestart = true; + } + + cJSON* superkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (superkeepProgress && + superkeepProgress->type == cJSON_String + && superkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", superkeepProgress->valuestring)) { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } else { + printf("ERROR: failed to read json, subscribe super table keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; + } - cJSON* superEndAfterConsume = - cJSON_GetObjectItem(superQuery, "endAfterConsume"); - if (superEndAfterConsume - && superEndAfterConsume->type == cJSON_Number) { - g_queryInfo.superQueryInfo.endAfterConsume = - superEndAfterConsume->valueint; - } else if (!superEndAfterConsume) { // default value is -1, which mean do not resub g_queryInfo.superQueryInfo.endAfterConsume = -1; - } + cJSON* superEndAfterConsume = + cJSON_GetObjectItem(superQuery, "endAfterConsume"); + if (superEndAfterConsume + && superEndAfterConsume->type == cJSON_Number) { + g_queryInfo.superQueryInfo.endAfterConsume = + superEndAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.endAfterConsume < -1) + g_queryInfo.superQueryInfo.endAfterConsume = -1; - cJSON* superResubAfterConsume = - cJSON_GetObjectItem(superQuery, "resubAfterConsume"); - if ((superResubAfterConsume) - && (superResubAfterConsume->type == cJSON_Number) - && (superResubAfterConsume->valueint >= 0)) { - g_queryInfo.superQueryInfo.resubAfterConsume = - superResubAfterConsume->valueint; - } else if (!superResubAfterConsume) { // default value is -1, which mean do not resub g_queryInfo.superQueryInfo.resubAfterConsume = -1; - } + cJSON* superResubAfterConsume = + cJSON_GetObjectItem(superQuery, "resubAfterConsume"); + if ((superResubAfterConsume) + && (superResubAfterConsume->type == cJSON_Number) + && (superResubAfterConsume->valueint >= 0)) { + g_queryInfo.superQueryInfo.resubAfterConsume = + superResubAfterConsume->valueint; + } + if (g_queryInfo.superQueryInfo.resubAfterConsume < -1) + g_queryInfo.superQueryInfo.resubAfterConsume = -1; + + // supert table sqls + cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); + if (!superSqls) { + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superSqls->type != cJSON_Array) { + errorPrint("%s() LN%d: failed to read json, super sqls not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", + __func__, __LINE__, MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } - // supert table sqls - cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { - errorPrint("%s() LN%d: failed to read json, super sqls not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - errorPrint("%s() LN%d, failed to read json, query sql size overflow, max is %d\n", - __func__, __LINE__, MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.superQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(superSqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String - || sqlStr->valuestring == NULL) { - errorPrint("%s() LN%d, failed to read json, sql not found\n", - __func__, __LINE__); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, - MAX_QUERY_SQL_LENGTH); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String - && result->valuestring != NULL){ - tstrncpy(g_queryInfo.superQueryInfo.result[j], - result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", - __func__, __LINE__); - goto PARSE_OVER; + g_queryInfo.superQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String + || sqlStr->valuestring == NULL) { + errorPrint("%s() LN%d, failed to read json, sql not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, + MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (result != NULL && result->type == cJSON_String + && result->valuestring != NULL){ + tstrncpy(g_queryInfo.superQueryInfo.result[j], + result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + errorPrint("%s() LN%d, failed to read json, sub query result file not found\n", + __func__, __LINE__); + goto PARSE_OVER; + } + } } - } } - } - ret = true; + ret = true; PARSE_OVER: - return ret; + return ret; } static bool getInfoFromJsonFile(char* file) { debugPrint("%s %d %s\n", __func__, __LINE__, file); - FILE *fp = fopen(file, "r"); - if (!fp) { - printf("failed to read %s, reason:%s\n", file, strerror(errno)); - return false; - } - - bool ret = false; - int maxLen = 6400000; - char *content = calloc(1, maxLen + 1); - int len = fread(content, 1, maxLen, fp); - if (len <= 0) { - free(content); - fclose(fp); - printf("failed to read %s, content is null", file); - return false; - } - - content[len] = 0; - cJSON* root = cJSON_Parse(content); - if (root == NULL) { - printf("ERROR: failed to cjson parse %s, invalid json format\n", file); - goto PARSE_OVER; - } - - cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); - if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { - if (0 == strcasecmp("insert", filetype->valuestring)) { - g_args.test_mode = INSERT_TEST; - } else if (0 == strcasecmp("query", filetype->valuestring)) { - g_args.test_mode = QUERY_TEST; - } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { - g_args.test_mode = SUBSCRIBE_TEST; + FILE *fp = fopen(file, "r"); + if (!fp) { + printf("failed to read %s, reason:%s\n", file, strerror(errno)); + return false; + } + + bool ret = false; + int maxLen = 6400000; + char *content = calloc(1, maxLen + 1); + int len = fread(content, 1, maxLen, fp); + if (len <= 0) { + free(content); + fclose(fp); + printf("failed to read %s, content is null", file); + return false; + } + + content[len] = 0; + cJSON* root = cJSON_Parse(content); + if (root == NULL) { + printf("ERROR: failed to cjson parse %s, invalid json format\n", file); + goto PARSE_OVER; + } + + cJSON* filetype = cJSON_GetObjectItem(root, "filetype"); + if (filetype && filetype->type == cJSON_String && filetype->valuestring != NULL) { + if (0 == strcasecmp("insert", filetype->valuestring)) { + g_args.test_mode = INSERT_TEST; + } else if (0 == strcasecmp("query", filetype->valuestring)) { + g_args.test_mode = QUERY_TEST; + } else if (0 == strcasecmp("subscribe", filetype->valuestring)) { + g_args.test_mode = SUBSCRIBE_TEST; + } else { + printf("ERROR: failed to read json, filetype not support\n"); + goto PARSE_OVER; + } + } else if (!filetype) { + g_args.test_mode = INSERT_TEST; } else { - printf("ERROR: failed to read json, filetype not support\n"); - goto PARSE_OVER; + printf("ERROR: failed to read json, filetype not found\n"); + goto PARSE_OVER; + } + + if (INSERT_TEST == g_args.test_mode) { + ret = getMetaFromInsertJsonFile(root); + } else if ((QUERY_TEST == g_args.test_mode) + || (SUBSCRIBE_TEST == g_args.test_mode)) { + ret = getMetaFromQueryJsonFile(root); + } else { + errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", + __func__, __LINE__); + goto PARSE_OVER; } - } else if (!filetype) { - g_args.test_mode = INSERT_TEST; - } else { - printf("ERROR: failed to read json, filetype not found\n"); - goto PARSE_OVER; - } - - if (INSERT_TEST == g_args.test_mode) { - ret = getMetaFromInsertJsonFile(root); - } else if ((QUERY_TEST == g_args.test_mode) - || (SUBSCRIBE_TEST == g_args.test_mode)) { - ret = getMetaFromQueryJsonFile(root); - } else { - errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", - __func__, __LINE__); - goto PARSE_OVER; - } PARSE_OVER: - free(content); - cJSON_Delete(root); - fclose(fp); - return ret; + free(content); + cJSON_Delete(root); + fclose(fp); + return ret; } -static void prepareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { - (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]); - } +static int prepareSampleData() { + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { + if (readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]) != 0) { + return -1; + } + } + } } - } + + return 0; } static void postFreeResource() { - tmfclose(g_fpOfInsertResult); - for (int i = 0; i < g_Dbs.dbCount; i++) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { - free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); - g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { - free(g_Dbs.db[i].superTbls[j].sampleDataBuf); - g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { - free(g_Dbs.db[i].superTbls[j].tagDataBuf); - g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; - } - if (0 != g_Dbs.db[i].superTbls[j].childTblName) { - free(g_Dbs.db[i].superTbls[j].childTblName); - g_Dbs.db[i].superTbls[j].childTblName = NULL; - } - } - } + tmfclose(g_fpOfInsertResult); + for (int i = 0; i < g_Dbs.dbCount; i++) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { + free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); + g_Dbs.db[i].superTbls[j].colsOfCreateChildTable = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].sampleDataBuf) { + free(g_Dbs.db[i].superTbls[j].sampleDataBuf); + g_Dbs.db[i].superTbls[j].sampleDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].tagDataBuf) { + free(g_Dbs.db[i].superTbls[j].tagDataBuf); + g_Dbs.db[i].superTbls[j].tagDataBuf = NULL; + } + if (0 != g_Dbs.db[i].superTbls[j].childTblName) { + free(g_Dbs.db[i].superTbls[j].childTblName); + g_Dbs.db[i].superTbls[j].childTblName = NULL; + } + } + } } static int getRowDataFromSample( char* dataBuf, int64_t maxLen, int64_t timestamp, - SSuperTable* superTblInfo, int64_t* sampleUsePos) { - if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { -/* int ret = readSampleFromCsvFileToMem(superTblInfo); - if (0 != ret) { - tmfree(superTblInfo->sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; + SSuperTable* superTblInfo, int64_t* sampleUsePos) +{ + if ((*sampleUsePos) == MAX_SAMPLES_ONCE_FROM_FILE) { + /* int ret = readSampleFromCsvFileToMem(superTblInfo); + if (0 != ret) { + tmfree(superTblInfo->sampleDataBuf); + superTblInfo->sampleDataBuf = NULL; + return -1; + } + */ + *sampleUsePos = 0; } -*/ - *sampleUsePos = 0; - } - int dataLen = 0; + int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "(%" PRId64 ", ", timestamp); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, - "%s", superTblInfo->sampleDataBuf + superTblInfo->lenOfOneRow * (*sampleUsePos)); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + "(%" PRId64 ", ", timestamp); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + "%s", + superTblInfo->sampleDataBuf + + superTblInfo->lenOfOneRow * (*sampleUsePos)); + dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); - (*sampleUsePos)++; + (*sampleUsePos)++; - return dataLen; + return dataLen; } static int64_t generateStbRowData( SSuperTable* stbInfo, char* recBuf, int64_t timestamp) { - int64_t dataLen = 0; - char *pstr = recBuf; - int64_t maxLen = MAX_DATA_SIZE; - - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "(%" PRId64 ",", timestamp); - - for (int i = 0; i < stbInfo->columnCount; i++) { - if ((0 == strncasecmp(stbInfo->columns[i].dataType, - "BINARY", strlen("BINARY"))) - || (0 == strncasecmp(stbInfo->columns[i].dataType, - "NCHAR", strlen("NCHAR")))) { - if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { - errorPrint( "binary or nchar length overflow, max size:%u\n", - (uint32_t)TSDB_MAX_BINARY_LEN); - return -1; - } + int64_t dataLen = 0; + char *pstr = recBuf; + int64_t maxLen = MAX_DATA_SIZE; + + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "(%" PRId64 ",", timestamp); + + for (int i = 0; i < stbInfo->columnCount; i++) { + if ((0 == strncasecmp(stbInfo->columns[i].dataType, + "BINARY", strlen("BINARY"))) + || (0 == strncasecmp(stbInfo->columns[i].dataType, + "NCHAR", strlen("NCHAR")))) { + if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { + errorPrint( "binary or nchar length overflow, max size:%u\n", + (uint32_t)TSDB_MAX_BINARY_LEN); + return -1; + } - char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); - if (NULL == buf) { - errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return -1; - } - rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); - tmfree(buf); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "INT", strlen("INT"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d,", rand_int()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BIGINT", strlen("BIGINT"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%"PRId64",", rand_bigint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "FLOAT", strlen("FLOAT"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%f,", rand_float()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "DOUBLE", strlen("DOUBLE"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%f,", rand_double()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "SMALLINT", strlen("SMALLINT"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d,", rand_smallint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TINYINT", strlen("TINYINT"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d,", rand_tinyint()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "BOOL", strlen("BOOL"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%d,", rand_bool()); - } else if (0 == strncasecmp(stbInfo->columns[i].dataType, - "TIMESTAMP", strlen("TIMESTAMP"))) { - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, - "%"PRId64",", rand_bigint()); - } else { - errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); - return -1; + char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); + if (NULL == buf) { + errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); + return -1; + } + rand_string(buf, stbInfo->columns[i].dataLen); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); + tmfree(buf); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "INT", strlen("INT"))) { + if ((g_args.demo_mode) && (i == 1)) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d,", demo_voltage_int()); + } else { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d,", rand_int()); + } + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "BIGINT", strlen("BIGINT"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%"PRId64",", rand_bigint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "FLOAT", strlen("FLOAT"))) { + if (g_args.demo_mode) { + if (i == 0) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%f,", demo_current_float()); + } else { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%f,", demo_phase_float()); + } + } else { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%f,", rand_float()); + } + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "DOUBLE", strlen("DOUBLE"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%f,", rand_double()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "SMALLINT", strlen("SMALLINT"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d,", rand_smallint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "TINYINT", strlen("TINYINT"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d,", rand_tinyint()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "BOOL", strlen("BOOL"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%d,", rand_bool()); + } else if (0 == strncasecmp(stbInfo->columns[i].dataType, + "TIMESTAMP", strlen("TIMESTAMP"))) { + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, + "%"PRId64",", rand_bigint()); + } else { + errorPrint( "Not support data type: %s\n", stbInfo->columns[i].dataType); + return -1; + } } - } - dataLen -= 1; - dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); + dataLen -= 1; + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + verbosePrint("%s() LN%d, dataLen:%"PRId64"\n", __func__, __LINE__, dataLen); + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - return strlen(recBuf); + return strlen(recBuf); } static int64_t generateData(char *recBuf, char **data_type, int64_t timestamp, int lenOfBinary) { - memset(recBuf, 0, MAX_DATA_SIZE); - char *pstr = recBuf; - pstr += sprintf(pstr, "(%" PRId64, timestamp); - - int columnCount = g_args.num_of_CPR; - - for (int i = 0; i < columnCount; i++) { - if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) { - pstr += sprintf(pstr, ",%d", rand_tinyint() ); - } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) { - pstr += sprintf(pstr, ",%d", rand_smallint()); - } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { - pstr += sprintf(pstr, ",%d", rand_int()); - } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { - pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); - } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { - pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); - } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { - pstr += sprintf(pstr, ",%10.4f", rand_float()); - } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { - double t = rand_double(); - pstr += sprintf(pstr, ",%20.8f", t); - } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) { - bool b = rand_bool() & 1; - pstr += sprintf(pstr, ",%s", b ? "true" : "false"); - } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { - char *s = malloc(lenOfBinary); - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { - char *s = malloc(lenOfBinary); - rand_string(s, lenOfBinary); - pstr += sprintf(pstr, ",\"%s\"", s); - free(s); - } - - if (strlen(recBuf) > MAX_DATA_SIZE) { - perror("column length too long, abort"); - exit(-1); - } - } - - pstr += sprintf(pstr, ")"); - - verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); - - return (int32_t)strlen(recBuf); + memset(recBuf, 0, MAX_DATA_SIZE); + char *pstr = recBuf; + pstr += sprintf(pstr, "(%" PRId64, timestamp); + + int columnCount = g_args.num_of_CPR; + + for (int i = 0; i < columnCount; i++) { + if (strcasecmp(data_type[i % columnCount], "TINYINT") == 0) { + pstr += sprintf(pstr, ",%d", rand_tinyint() ); + } else if (strcasecmp(data_type[i % columnCount], "SMALLINT") == 0) { + pstr += sprintf(pstr, ",%d", rand_smallint()); + } else if (strcasecmp(data_type[i % columnCount], "INT") == 0) { + pstr += sprintf(pstr, ",%d", rand_int()); + } else if (strcasecmp(data_type[i % columnCount], "BIGINT") == 0) { + pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + } else if (strcasecmp(data_type[i % columnCount], "TIMESTAMP") == 0) { + pstr += sprintf(pstr, ",%" PRId64, rand_bigint()); + } else if (strcasecmp(data_type[i % columnCount], "FLOAT") == 0) { + pstr += sprintf(pstr, ",%10.4f", rand_float()); + } else if (strcasecmp(data_type[i % columnCount], "DOUBLE") == 0) { + double t = rand_double(); + pstr += sprintf(pstr, ",%20.8f", t); + } else if (strcasecmp(data_type[i % columnCount], "BOOL") == 0) { + bool b = rand_bool() & 1; + pstr += sprintf(pstr, ",%s", b ? "true" : "false"); + } else if (strcasecmp(data_type[i % columnCount], "BINARY") == 0) { + char *s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(-1); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + } else if (strcasecmp(data_type[i % columnCount], "NCHAR") == 0) { + char *s = malloc(lenOfBinary + 1); + if (s == NULL) { + errorPrint("%s() LN%d, memory allocation %d bytes failed\n", + __func__, __LINE__, lenOfBinary + 1); + exit(-1); + } + rand_string(s, lenOfBinary); + pstr += sprintf(pstr, ",\"%s\"", s); + free(s); + } + + if (strlen(recBuf) > MAX_DATA_SIZE) { + perror("column length too long, abort"); + exit(-1); + } + } + + pstr += sprintf(pstr, ")"); + + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + + return (int32_t)strlen(recBuf); } static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { - char* sampleDataBuf = NULL; + char* sampleDataBuf = NULL; - sampleDataBuf = calloc( + sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); - if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, - strerror(errno)); - return -1; - } - - superTblInfo->sampleDataBuf = sampleDataBuf; - int ret = readSampleFromCsvFileToMem(superTblInfo); - - if (0 != ret) { - errorPrint("%s() LN%d, read sample from csv file failed.\n", - __func__, __LINE__); - tmfree(sampleDataBuf); - superTblInfo->sampleDataBuf = NULL; - return -1; - } - - return 0; + if (sampleDataBuf == NULL) { + errorPrint("%s() LN%d, Failed to calloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + strerror(errno)); + return -1; + } + + superTblInfo->sampleDataBuf = sampleDataBuf; + int ret = readSampleFromCsvFileToMem(superTblInfo); + + if (0 != ret) { + errorPrint("%s() LN%d, read sample from csv file failed.\n", + __func__, __LINE__); + tmfree(sampleDataBuf); + superTblInfo->sampleDataBuf = NULL; + return -1; + } + + return 0; } static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) @@ -4927,13 +5119,17 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) uint16_t iface; if (superTblInfo) iface = superTblInfo->iface; - else - iface = g_args.iface; + else { + if (g_args.iface == INTERFACE_BUT) + iface = TAOSC_IFACE; + else + iface = g_args.iface; + } debugPrint("[%d] %s() LN%d %s\n", pThreadInfo->threadID, __func__, __LINE__, - (g_args.iface==TAOSC_IFACE)? - "taosc":(g_args.iface==REST_IFACE)?"rest":"stmt"); + (iface==TAOSC_IFACE)? + "taosc":(iface==REST_IFACE)?"rest":"stmt"); switch(iface) { case TAOSC_IFACE: @@ -4955,10 +5151,13 @@ static int32_t execInsert(threadInfo *pThreadInfo, uint32_t k) #if STMT_IFACE_ENABLED == 1 case STMT_IFACE: - debugPrint("%s() LN%d, stmt=%p", __func__, __LINE__, pThreadInfo->stmt); + debugPrint("%s() LN%d, stmt=%p", + __func__, __LINE__, pThreadInfo->stmt); if (0 != taos_stmt_execute(pThreadInfo->stmt)) { - errorPrint("%s() LN%d, failied to execute insert statement\n", - __func__, __LINE__); + errorPrint("%s() LN%d, failied to execute insert statement. reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(pThreadInfo->stmt)); + + fprintf(stderr, "\n\033[31m === Please reduce batch number if WAL size exceeds limit. ===\033[0m\n\n"); exit(-1); } affectedRows = k; @@ -5008,48 +5207,54 @@ static int32_t generateDataTailWithoutStb( uint64_t recordFrom, int64_t startTime, /* int64_t *pSamplePos, */int64_t *dataLen) { - uint64_t len = 0; - char *pstr = buffer; + uint64_t len = 0; + char *pstr = buffer; - verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); + verbosePrint("%s() LN%d batch=%d\n", __func__, __LINE__, batch); - int32_t k = 0; - for (k = 0; k < batch;) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); + int32_t k = 0; + for (k = 0; k < batch;) { + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); - int64_t retLen = 0; + int64_t retLen = 0; - char **data_type = g_args.datatype; - int lenOfBinary = g_args.len_of_binary; + char **data_type = g_args.datatype; + int lenOfBinary = g_args.len_of_binary; - retLen = generateData(data, data_type, - startTime + getTSRandTail( - (int64_t) DEFAULT_TIMESTAMP_STEP, k, - g_args.disorderRatio, - g_args.disorderRange), - lenOfBinary); + if (g_args.disorderRatio) { + retLen = generateData(data, data_type, + startTime + getTSRandTail( + g_args.timestamp_step, k, + g_args.disorderRatio, + g_args.disorderRange), + lenOfBinary); + } else { + retLen = generateData(data, data_type, + startTime + g_args.timestamp_step * k, + lenOfBinary); + } - if (len > remainderBufLen) - break; + if (len > remainderBufLen) + break; - pstr += sprintf(pstr, "%s", data); - k++; - len += retLen; - remainderBufLen -= retLen; + pstr += sprintf(pstr, "%s", data); + k++; + len += retLen; + remainderBufLen -= retLen; - verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); + verbosePrint("%s() LN%d len=%"PRIu64" k=%d \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); - recordFrom ++; + recordFrom ++; - if (recordFrom >= insertRows) { - break; + if (recordFrom >= insertRows) { + break; + } } - } - *dataLen = len; - return k; + *dataLen = len; + return k; } static int64_t getTSRandTail(int64_t timeStampStep, int32_t seq, @@ -5074,62 +5279,69 @@ static int32_t generateStbDataTail( int64_t remainderBufLen, int64_t insertRows, uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, int64_t *dataLen) { - uint64_t len = 0; + uint64_t len = 0; - char *pstr = buffer; + char *pstr = buffer; - bool tsRand; - if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { - tsRand = true; - } else { - tsRand = false; - } - verbosePrint("%s() LN%d batch=%u\n", __func__, __LINE__, batch); + bool tsRand; + if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { + tsRand = true; + } else { + tsRand = false; + } + verbosePrint("%s() LN%d batch=%u buflen=%"PRId64"\n", + __func__, __LINE__, batch, remainderBufLen); - int32_t k = 0; - for (k = 0; k < batch;) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); + int32_t k; + for (k = 0; k < batch;) { + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); - int64_t retLen = 0; + int64_t lenOfRow = 0; - if (tsRand) { - retLen = generateStbRowData(superTblInfo, data, - startTime + getTSRandTail( - superTblInfo->timeStampStep, k, - superTblInfo->disorderRatio, - superTblInfo->disorderRange) - ); - } else { - retLen = getRowDataFromSample( - data, - remainderBufLen, - startTime + superTblInfo->timeStampStep * k, - superTblInfo, - pSamplePos); - } + if (tsRand) { + if (superTblInfo->disorderRatio > 0) { + lenOfRow = generateStbRowData(superTblInfo, data, + startTime + getTSRandTail( + superTblInfo->timeStampStep, k, + superTblInfo->disorderRatio, + superTblInfo->disorderRange) + ); + } else { + lenOfRow = generateStbRowData(superTblInfo, data, + startTime + superTblInfo->timeStampStep * k + ); + } + } else { + lenOfRow = getRowDataFromSample( + data, + (remainderBufLen < MAX_DATA_SIZE)?remainderBufLen:MAX_DATA_SIZE, + startTime + superTblInfo->timeStampStep * k, + superTblInfo, + pSamplePos); + } - if (retLen > remainderBufLen) { - break; - } + if ((lenOfRow + 1) > remainderBufLen) { + break; + } - pstr += snprintf(pstr , retLen + 1, "%s", data); - k++; - len += retLen; - remainderBufLen -= retLen; + pstr += snprintf(pstr , lenOfRow + 1, "%s", data); + k++; + len += lenOfRow; + remainderBufLen -= lenOfRow; - verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", - __func__, __LINE__, len, k, buffer); + verbosePrint("%s() LN%d len=%"PRIu64" k=%u \nbuffer=%s\n", + __func__, __LINE__, len, k, buffer); - recordFrom ++; + recordFrom ++; - if (recordFrom >= insertRows) { - break; + if (recordFrom >= insertRows) { + break; + } } - } - *dataLen = len; - return k; + *dataLen = len; + return k; } @@ -5137,82 +5349,82 @@ static int generateSQLHeadWithoutStb(char *tableName, char *dbName, char *buffer, int remainderBufLen) { - int len; + int len; - char headBuf[HEAD_BUFF_LEN]; + char headBuf[HEAD_BUFF_LEN]; - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); - if (len > remainderBufLen) - return -1; + if (len > remainderBufLen) + return -1; - tstrncpy(buffer, headBuf, len + 1); + tstrncpy(buffer, headBuf, len + 1); - return len; + return len; } static int generateStbSQLHead( SSuperTable* superTblInfo, - char *tableName, int32_t tableSeq, + char *tableName, int64_t tableSeq, char *dbName, char *buffer, int remainderBufLen) { - int len; + int len; - char headBuf[HEAD_BUFF_LEN]; + char headBuf[HEAD_BUFF_LEN]; - if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); - } else { + if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagValuesForStb(superTblInfo, tableSeq); + } else { tagsValBuf = getTagValueFromTagSample( superTblInfo, tableSeq % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { - errorPrint("%s() LN%d, tag buf failed to allocate memory\n", - __func__, __LINE__); - return -1; - } - - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s using %s.%s tags %s values", - dbName, - tableName, - dbName, - superTblInfo->sTblName, - tagsValBuf); - tmfree(tagsValBuf); + } + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } + + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s using %s.%s TAGS%s values", + dbName, + tableName, + dbName, + superTblInfo->sTblName, + tagsValBuf); + tmfree(tagsValBuf); } else if (TBL_ALREADY_EXISTS == superTblInfo->childTblExists) { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); } else { - len = snprintf( - headBuf, - HEAD_BUFF_LEN, - "%s.%s values", - dbName, - tableName); - } - - if (len > remainderBufLen) - return -1; + len = snprintf( + headBuf, + HEAD_BUFF_LEN, + "%s.%s values", + dbName, + tableName); + } + + if (len > remainderBufLen) + return -1; - tstrncpy(buffer, headBuf, len + 1); + tstrncpy(buffer, headBuf, len + 1); - return len; + return len; } static int32_t generateStbInterlaceData( @@ -5226,52 +5438,52 @@ static int32_t generateStbInterlaceData( int64_t startTime, uint64_t *pRemainderBufLen) { - assert(buffer); - char *pstr = buffer; + assert(buffer); + char *pstr = buffer; - int headLen = generateStbSQLHead( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, - pstr, *pRemainderBufLen); + int headLen = generateStbSQLHead( + superTblInfo, + tableName, tableSeq, pThreadInfo->db_name, + pstr, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } - // generate data buffer - verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", + if (headLen <= 0) { + return 0; + } + // generate data buffer + verbosePrint("[%d] %s() LN%d i=%"PRIu64" buffer:\n%s\n", pThreadInfo->threadID, __func__, __LINE__, i, buffer); - pstr += headLen; - *pRemainderBufLen -= headLen; + pstr += headLen; + *pRemainderBufLen -= headLen; - int64_t dataLen = 0; + int64_t dataLen = 0; - verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", + verbosePrint("[%d] %s() LN%d i=%"PRIu64" batchPerTblTimes=%u batchPerTbl = %u\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - startTime = taosGetTimestamp(pThreadInfo->time_precision); - } + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(pThreadInfo->time_precision); + } - int32_t k = generateStbDataTail( + int32_t k = generateStbDataTail( superTblInfo, batchPerTbl, pstr, *pRemainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); - if (k == batchPerTbl) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", - __func__, __LINE__, k, batchPerTbl); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; + if (k == batchPerTbl) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + debugPrint("%s() LN%d, generated data tail: %u, not equal batch per table: %u\n", + __func__, __LINE__, k, batchPerTbl); + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } + + return k; } static int64_t generateInterlaceDataWithoutStb( @@ -5282,44 +5494,44 @@ static int64_t generateInterlaceDataWithoutStb( int64_t startTime, uint64_t *pRemainderBufLen) { - assert(buffer); - char *pstr = buffer; + assert(buffer); + char *pstr = buffer; - int headLen = generateSQLHeadWithoutStb( - tableName, dbName, + int headLen = generateSQLHeadWithoutStb( + tableName, dbName, pstr, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } + if (headLen <= 0) { + return 0; + } - pstr += headLen; - *pRemainderBufLen -= headLen; + pstr += headLen; + *pRemainderBufLen -= headLen; - int64_t dataLen = 0; + int64_t dataLen = 0; - int32_t k = generateDataTailWithoutStb( + int32_t k = generateDataTailWithoutStb( batch, pstr, *pRemainderBufLen, insertRows, 0, startTime, &dataLen); - if (k == batch) { - pstr += dataLen; - *pRemainderBufLen -= dataLen; - } else { - debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n", - __func__, __LINE__, k, batch); - pstr -= headLen; - pstr[0] = '\0'; - k = 0; - } - - return k; + if (k == batch) { + pstr += dataLen; + *pRemainderBufLen -= dataLen; + } else { + debugPrint("%s() LN%d, generated data tail: %d, not equal batch per table: %u\n", + __func__, __LINE__, k, batch); + pstr -= headLen; + pstr[0] = '\0'; + k = 0; + } + + return k; } #if STMT_IFACE_ENABLED == 1 static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, - char *dataType, int32_t dataLen, char **ptr) + char *dataType, int32_t dataLen, char **ptr, char *value) { if (0 == strncasecmp(dataType, "BINARY", strlen("BINARY"))) { @@ -5329,12 +5541,18 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, return -1; } char *bind_binary = (char *)*ptr; - rand_string(bind_binary, dataLen); bind->buffer_type = TSDB_DATA_TYPE_BINARY; - bind->buffer_length = dataLen; - bind->buffer = bind_binary; + if (value) { + strncpy(bind_binary, value, strlen(value)); + bind->buffer_length = strlen(bind_binary); + } else { + rand_string(bind_binary, dataLen); + bind->buffer_length = dataLen; + } + bind->length = &bind->buffer_length; + bind->buffer = bind_binary; bind->is_null = NULL; *ptr += bind->buffer_length; @@ -5346,9 +5564,14 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, return -1; } char *bind_nchar = (char *)*ptr; - rand_string(bind_nchar, dataLen); bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + if (value) { + strncpy(bind_nchar, value, strlen(value)); + } else { + rand_string(bind_nchar, dataLen); + } + bind->buffer_length = strlen(bind_nchar); bind->buffer = bind_nchar; bind->length = &bind->buffer_length; @@ -5359,7 +5582,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "INT", strlen("INT"))) { int32_t *bind_int = (int32_t *)*ptr; - *bind_int = rand_int(); + if (value) { + *bind_int = atoi(value); + } else { + *bind_int = rand_int(); + } bind->buffer_type = TSDB_DATA_TYPE_INT; bind->buffer_length = sizeof(int32_t); bind->buffer = bind_int; @@ -5371,7 +5598,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "BIGINT", strlen("BIGINT"))) { int64_t *bind_bigint = (int64_t *)*ptr; - *bind_bigint = rand_bigint(); + if (value) { + *bind_bigint = atoll(value); + } else { + *bind_bigint = rand_bigint(); + } bind->buffer_type = TSDB_DATA_TYPE_BIGINT; bind->buffer_length = sizeof(int64_t); bind->buffer = bind_bigint; @@ -5383,7 +5614,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "FLOAT", strlen("FLOAT"))) { float *bind_float = (float *) *ptr; - *bind_float = rand_float(); + if (value) { + *bind_float = (float)atof(value); + } else { + *bind_float = rand_float(); + } bind->buffer_type = TSDB_DATA_TYPE_FLOAT; bind->buffer_length = sizeof(float); bind->buffer = bind_float; @@ -5395,7 +5630,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "DOUBLE", strlen("DOUBLE"))) { double *bind_double = (double *)*ptr; - *bind_double = rand_double(); + if (value) { + *bind_double = atof(value); + } else { + *bind_double = rand_double(); + } bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; bind->buffer_length = sizeof(double); bind->buffer = bind_double; @@ -5407,7 +5646,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "SMALLINT", strlen("SMALLINT"))) { int16_t *bind_smallint = (int16_t *)*ptr; - *bind_smallint = rand_smallint(); + if (value) { + *bind_smallint = (int16_t)atoi(value); + } else { + *bind_smallint = rand_smallint(); + } bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; bind->buffer_length = sizeof(int16_t); bind->buffer = bind_smallint; @@ -5419,7 +5662,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "TINYINT", strlen("TINYINT"))) { int8_t *bind_tinyint = (int8_t *)*ptr; - *bind_tinyint = rand_tinyint(); + if (value) { + *bind_tinyint = (int8_t)atoi(value); + } else { + *bind_tinyint = rand_tinyint(); + } bind->buffer_type = TSDB_DATA_TYPE_TINYINT; bind->buffer_length = sizeof(int8_t); bind->buffer = bind_tinyint; @@ -5442,7 +5689,11 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, "TIMESTAMP", strlen("TIMESTAMP"))) { int64_t *bind_ts2 = (int64_t *) *ptr; - *bind_ts2 = rand_bigint(); + if (value) { + *bind_ts2 = atoll(value); + } else { + *bind_ts2 = rand_bigint(); + } bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; bind->buffer_length = sizeof(int64_t); bind->buffer = bind_ts2; @@ -5451,8 +5702,7 @@ static int32_t prepareStmtBindArrayByType(TAOS_BIND *bind, *ptr += bind->buffer_length; } else { - errorPrint( "No support data type: %s\n", - dataType); + errorPrint( "No support data type: %s\n", dataType); return -1; } @@ -5470,7 +5720,7 @@ static int32_t prepareStmtWithoutStb( int ret = taos_stmt_set_tbname(stmt, tableName); if (ret != 0) { errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", - tableName, ret, taos_errstr(NULL)); + tableName, ret, taos_stmt_errstr(stmt)); return ret; } @@ -5496,10 +5746,15 @@ static int32_t prepareStmtWithoutStb( bind_ts = (int64_t *)ptr; bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - *bind_ts = startTime + getTSRandTail( - (int64_t)DEFAULT_TIMESTAMP_STEP, k, - g_args.disorderRatio, - g_args.disorderRange); + + if (g_args.disorderRatio) { + *bind_ts = startTime + getTSRandTail( + g_args.timestamp_step, k, + g_args.disorderRatio, + g_args.disorderRange); + } else { + *bind_ts = startTime + g_args.timestamp_step * k; + } bind->buffer_length = sizeof(int64_t); bind->buffer = bind_ts; bind->length = &bind->buffer_length; @@ -5508,18 +5763,27 @@ static int32_t prepareStmtWithoutStb( ptr += bind->buffer_length; for (int i = 0; i < g_args.num_of_CPR; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); + bind = (TAOS_BIND *)((char *)bindArray + + (sizeof(TAOS_BIND) * (i + 1))); if ( -1 == prepareStmtBindArrayByType( bind, data_type[i], g_args.len_of_binary, - &ptr)) { + &ptr, NULL)) { return -1; } } - taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { + errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + break; + } // if msg > 3MB, break - taos_stmt_add_batch(stmt); + if (0 != taos_stmt_add_batch(stmt)) { + errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + break; + } k++; recordFrom ++; @@ -5532,78 +5796,220 @@ static int32_t prepareStmtWithoutStb( return k; } -static int32_t prepareStbStmt(SSuperTable *stbInfo, - TAOS_STMT *stmt, - char *tableName, uint32_t batch, - uint64_t insertRows, - uint64_t recordFrom, - int64_t startTime, char *buffer) +static int32_t prepareStbStmtBind( + char *bindArray, SSuperTable *stbInfo, bool sourceRand, + int64_t startTime, int32_t recSeq, + bool isColumn) { - int ret = taos_stmt_set_tbname(stmt, tableName); - if (ret != 0) { - errorPrint("failed to execute taos_stmt_set_tbname(%s). return 0x%x. reason: %s\n", - tableName, ret, taos_errstr(NULL)); - return ret; + char *bindBuffer = calloc(1, g_args.len_of_binary); + if (bindBuffer == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind buffer\n", + __func__, __LINE__, g_args.len_of_binary); + return -1; } - char *bindArray = malloc(sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); - if (bindArray == NULL) { - errorPrint("Failed to allocate %d bind params\n", - (stbInfo->columnCount + 1)); - return -1; + char data[MAX_DATA_SIZE]; + memset(data, 0, MAX_DATA_SIZE); + char *ptr = data; + + TAOS_BIND *bind; + + if (isColumn) { + int cursor = 0; + + for (int i = 0; i < stbInfo->columnCount + 1; i ++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + + if (i == 0) { + int64_t *bind_ts; + + bind_ts = (int64_t *)ptr; + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + if (stbInfo->disorderRatio) { + *bind_ts = startTime + getTSRandTail( + stbInfo->timeStampStep, recSeq, + stbInfo->disorderRatio, + stbInfo->disorderRange); + } else { + *bind_ts = startTime + stbInfo->timeStampStep * recSeq; + } + bind->buffer_length = sizeof(int64_t); + bind->buffer = bind_ts; + bind->length = &bind->buffer_length; + bind->is_null = NULL; + + ptr += bind->buffer_length; + } else { + + if (sourceRand) { + if ( -1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + &ptr, + NULL)) { + free(bindBuffer); + return -1; + } + } else { + char *restStr = stbInfo->sampleDataBuf + cursor; + int lengthOfRest = strlen(restStr); + + int index = 0; + for (index = 0; index < lengthOfRest; index ++) { + if (restStr[index] == ',') { + break; + } + } + + memset(bindBuffer, 0, g_args.len_of_binary); + strncpy(bindBuffer, restStr, index); + cursor += index + 1; // skip ',' too + + if ( -1 == prepareStmtBindArrayByType( + bind, + stbInfo->columns[i-1].dataType, + stbInfo->columns[i-1].dataLen, + &ptr, + bindBuffer)) { + free(bindBuffer); + return -1; + } + } + } + } + } else { + TAOS_BIND *tag; + + for (int t = 0; t < stbInfo->tagCount; t ++) { + tag = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * t)); + if ( -1 == prepareStmtBindArrayByType( + tag, + stbInfo->tags[t].dataType, + stbInfo->tags[t].dataLen, + &ptr, + NULL)) { + free(bindBuffer); + return -1; + } + } + } - bool tsRand; + free(bindBuffer); + return 0; +} + +static int32_t prepareStbStmt( + SSuperTable *stbInfo, + TAOS_STMT *stmt, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + int ret; + + bool sourceRand; if (0 == strncasecmp(stbInfo->dataSource, "rand", strlen("rand"))) { - tsRand = true; + sourceRand = true; } else { - tsRand = false; + sourceRand = false; // from sample data file } - uint32_t k; - for (k = 0; k < batch;) { - /* columnCount + 1 (ts) */ - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); + if (AUTO_CREATE_SUBTBL == stbInfo->autoCreateTable) { + char* tagsValBuf = NULL; - char *ptr = data; - TAOS_BIND *bind = (TAOS_BIND *)(bindArray + 0); + bool tagRand; + if (0 == stbInfo->tagSource) { + tagRand = true; + tagsValBuf = generateTagValuesForStb(stbInfo, tableSeq); + } else { + tagRand = false; + tagsValBuf = getTagValueFromTagSample( + stbInfo, + tableSeq % stbInfo->tagSampleCount); + } - int64_t *bind_ts; + if (NULL == tagsValBuf) { + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; + } - bind_ts = (int64_t *)ptr; - bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; - if (tsRand) { - *bind_ts = startTime + getTSRandTail( - stbInfo->timeStampStep, k, - stbInfo->disorderRatio, - stbInfo->disorderRange); - } else { - *bind_ts = startTime + stbInfo->timeStampStep * k; + char *tagsArray = calloc(1, sizeof(TAOS_BIND) * stbInfo->tagCount); + if (NULL == tagsArray) { + tmfree(tagsValBuf); + errorPrint("%s() LN%d, tag buf failed to allocate memory\n", + __func__, __LINE__); + return -1; } - bind->buffer_length = sizeof(int64_t); - bind->buffer = bind_ts; - bind->length = &bind->buffer_length; - bind->is_null = NULL; - ptr += bind->buffer_length; + if (-1 == prepareStbStmtBind( + tagsArray, stbInfo, tagRand, -1, -1, false /* is tag */)) { + tmfree(tagsValBuf); + tmfree(tagsArray); + return -1; + } - for (int i = 0; i < stbInfo->columnCount; i ++) { - bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * (i + 1))); - if ( -1 == prepareStmtBindArrayByType( - bind, - stbInfo->columns[i].dataType, - stbInfo->columns[i].dataLen, - &ptr)) { - return -1; - } + ret = taos_stmt_set_tbname_tags(stmt, tableName, (TAOS_BIND *)tagsArray); + + tmfree(tagsValBuf); + tmfree(tagsArray); + + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname_tags() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } else { + ret = taos_stmt_set_tbname(stmt, tableName); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_set_tbname() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } + } + + char *bindArray = calloc(1, sizeof(TAOS_BIND) * (stbInfo->columnCount + 1)); + if (bindArray == NULL) { + errorPrint("%s() LN%d, Failed to allocate %d bind params\n", + __func__, __LINE__, (stbInfo->columnCount + 1)); + return -1; + } + + uint32_t k; + for (k = 0; k < batch;) { + /* columnCount + 1 (ts) */ + if (-1 == prepareStbStmtBind(bindArray, stbInfo, sourceRand, + startTime, k, true /* is column */)) { + free(bindArray); + return -1; + } + ret = taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; } - taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray); // if msg > 3MB, break - taos_stmt_add_batch(stmt); + ret = taos_stmt_add_batch(stmt); + if (0 != ret) { + errorPrint("%s() LN%d, stmt_add_batch() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + return -1; + } k++; recordFrom ++; + + if (!sourceRand) { + (*pSamplePos) ++; + } + if (recordFrom >= insertRows) { break; } @@ -5612,6 +6018,49 @@ static int32_t prepareStbStmt(SSuperTable *stbInfo, free(bindArray); return k; } + +static int32_t prepareStbStmtInterlace( + SSuperTable *stbInfo, + TAOS_STMT *stmt, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + return prepareStbStmt( + stbInfo, + stmt, + tableName, + tableSeq, + batch, + insertRows, 0, startTime, + pSamplePos); +} + +static int32_t prepareStbStmtProgressive( + SSuperTable *stbInfo, + TAOS_STMT *stmt, + char *tableName, + int64_t tableSeq, + uint32_t batch, + uint64_t insertRows, + uint64_t recordFrom, + int64_t startTime, + int64_t *pSamplePos) +{ + return prepareStbStmt( + stbInfo, + stmt, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, recordFrom, startTime, + pSamplePos); +} + #endif static int32_t generateStbProgressiveData( @@ -5623,29 +6072,29 @@ static int32_t generateStbProgressiveData( uint64_t recordFrom, int64_t startTime, int64_t *pSamplePos, int64_t *pRemainderBufLen) { - assert(buffer != NULL); - char *pstr = buffer; + assert(buffer != NULL); + char *pstr = buffer; - memset(buffer, 0, *pRemainderBufLen); + memset(buffer, 0, *pRemainderBufLen); - int64_t headLen = generateStbSQLHead( - superTblInfo, - tableName, tableSeq, dbName, - buffer, *pRemainderBufLen); + int64_t headLen = generateStbSQLHead( + superTblInfo, + tableName, tableSeq, dbName, + buffer, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; - int64_t dataLen; + int64_t dataLen; - return generateStbDataTail(superTblInfo, - g_args.num_of_RPR, pstr, *pRemainderBufLen, - insertRows, recordFrom, - startTime, - pSamplePos, &dataLen); + return generateStbDataTail(superTblInfo, + g_args.num_of_RPR, pstr, *pRemainderBufLen, + insertRows, recordFrom, + startTime, + pSamplePos, &dataLen); } static int32_t generateProgressiveDataWithoutStb( @@ -5656,1546 +6105,1596 @@ static int32_t generateProgressiveDataWithoutStb( uint64_t recordFrom, int64_t startTime, /*int64_t *pSamplePos, */ int64_t *pRemainderBufLen) { - assert(buffer != NULL); - char *pstr = buffer; + assert(buffer != NULL); + char *pstr = buffer; - memset(buffer, 0, *pRemainderBufLen); + memset(buffer, 0, *pRemainderBufLen); - int64_t headLen = generateSQLHeadWithoutStb( - tableName, pThreadInfo->db_name, - buffer, *pRemainderBufLen); + int64_t headLen = generateSQLHeadWithoutStb( + tableName, pThreadInfo->db_name, + buffer, *pRemainderBufLen); - if (headLen <= 0) { - return 0; - } - pstr += headLen; - *pRemainderBufLen -= headLen; + if (headLen <= 0) { + return 0; + } + pstr += headLen; + *pRemainderBufLen -= headLen; - int64_t dataLen; + int64_t dataLen; - return generateDataTailWithoutStb( - g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, - startTime, - /*pSamplePos, */&dataLen); + return generateDataTailWithoutStb( + g_args.num_of_RPR, pstr, *pRemainderBufLen, insertRows, recordFrom, + startTime, + /*pSamplePos, */&dataLen); } static void printStatPerThread(threadInfo *pThreadInfo) { fprintf(stderr, "====thread[%d] completed total inserted rows: %"PRIu64 ", total affected rows: %"PRIu64". %.2f records/second====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows, - (double)(pThreadInfo->totalAffectedRows / (pThreadInfo->totalDelay/1000.0))); + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows, + (pThreadInfo->totalDelay/1000.0)? + (double)(pThreadInfo->totalAffectedRows/(pThreadInfo->totalDelay/1000.0)): + FLT_MAX); } // sync write interlace data static void* syncWriteInterlace(threadInfo *pThreadInfo) { - debugPrint("[%d] %s() LN%d: ### interlace write\n", - pThreadInfo->threadID, __func__, __LINE__); + debugPrint("[%d] %s() LN%d: ### interlace write\n", + pThreadInfo->threadID, __func__, __LINE__); - int64_t insertRows; - uint32_t interlaceRows; - uint64_t maxSqlLen; - int64_t nTimeStampStep; - uint64_t insert_interval; + int64_t insertRows; + uint32_t interlaceRows; + uint64_t maxSqlLen; + int64_t nTimeStampStep; + uint64_t insert_interval; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - if (superTblInfo) { - insertRows = superTblInfo->insertRows; + if (superTblInfo) { + insertRows = superTblInfo->insertRows; - if ((superTblInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } + maxSqlLen = superTblInfo->maxSqlLen; + nTimeStampStep = superTblInfo->timeStampStep; + insert_interval = superTblInfo->insertInterval; } else { - interlaceRows = superTblInfo->interlaceRows; - } - maxSqlLen = superTblInfo->maxSqlLen; - nTimeStampStep = superTblInfo->timeStampStep; - insert_interval = superTblInfo->insertInterval; - } else { - insertRows = g_args.num_of_DPT; - interlaceRows = g_args.interlace_rows; - maxSqlLen = g_args.max_sql_len; - nTimeStampStep = DEFAULT_TIMESTAMP_STEP; - insert_interval = g_args.insert_interval; - } - - debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, - pThreadInfo->start_table_from, - pThreadInfo->ntables, insertRows); - - if (interlaceRows > insertRows) - interlaceRows = insertRows; - - if (interlaceRows > g_args.num_of_RPR) - interlaceRows = g_args.num_of_RPR; - - uint32_t batchPerTbl = interlaceRows; - uint32_t batchPerTblTimes; - - if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { - batchPerTblTimes = - g_args.num_of_RPR / interlaceRows; - } else { - batchPerTblTimes = 1; - } - - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", - __func__, __LINE__, maxSqlLen, strerror(errno)); - return NULL; - } + insertRows = g_args.num_of_DPT; + interlaceRows = g_args.interlace_rows; + maxSqlLen = g_args.max_sql_len; + nTimeStampStep = g_args.timestamp_step; + insert_interval = g_args.insert_interval; + } - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; + debugPrint("[%d] %s() LN%d: start_table_from=%"PRIu64" ntables=%"PRId64" insertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, + pThreadInfo->start_table_from, + pThreadInfo->ntables, insertRows); - uint64_t st = 0; - uint64_t et = UINT64_MAX; + if (interlaceRows > insertRows) + interlaceRows = insertRows; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; + if (interlaceRows > g_args.num_of_RPR) + interlaceRows = g_args.num_of_RPR; - uint64_t tableSeq = pThreadInfo->start_table_from; - int64_t startTime = pThreadInfo->start_time; + uint32_t batchPerTbl = interlaceRows; + uint32_t batchPerTblTimes; - uint64_t generatedRecPerTbl = 0; - bool flagSleep = true; - uint64_t sleepTimeTotal = 0; + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { + batchPerTblTimes = + g_args.num_of_RPR / interlaceRows; + } else { + batchPerTblTimes = 1; + } - while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { - if ((flagSleep) && (insert_interval)) { - st = taosGetTimestampMs(); - flagSleep = false; + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint( "%s() LN%d, Failed to alloc %"PRIu64" Bytes, reason:%s\n", + __func__, __LINE__, maxSqlLen, strerror(errno)); + return NULL; } - // generate data - memset(pThreadInfo->buffer, 0, maxSqlLen); - uint64_t remainderBufLen = maxSqlLen; - char *pstr = pThreadInfo->buffer; + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; - int len = snprintf(pstr, - strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - pstr += len; - remainderBufLen -= len; + uint64_t st = 0; + uint64_t et = UINT64_MAX; - uint32_t recOfBatch = 0; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; - for (uint32_t i = 0; i < batchPerTblTimes; i ++) { - char tableName[TSDB_TABLE_NAME_LEN]; + uint64_t tableSeq = pThreadInfo->start_table_from; + int64_t startTime = pThreadInfo->start_time; - getTableName(tableName, pThreadInfo, tableSeq); - if (0 == strlen(tableName)) { - errorPrint("[%d] %s() LN%d, getTableName return null\n", - pThreadInfo->threadID, __func__, __LINE__); - free(pThreadInfo->buffer); - return NULL; - } + uint64_t generatedRecPerTbl = 0; + bool flagSleep = true; + uint64_t sleepTimeTotal = 0; + + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { + if ((flagSleep) && (insert_interval)) { + st = taosGetTimestampMs(); + flagSleep = false; + } + // generate data + memset(pThreadInfo->buffer, 0, maxSqlLen); + uint64_t remainderBufLen = maxSqlLen; + + char *pstr = pThreadInfo->buffer; + + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); + pstr += len; + remainderBufLen -= len; + + uint32_t recOfBatch = 0; + + for (uint64_t i = 0; i < batchPerTblTimes; i ++) { + char tableName[TSDB_TABLE_NAME_LEN]; - uint64_t oldRemainderLen = remainderBufLen; + getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } + + uint64_t oldRemainderLen = remainderBufLen; - int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + int32_t generated; + if (superTblInfo) { + if (superTblInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmt(superTblInfo, - pThreadInfo->stmt, - tableName, - batchPerTbl, - insertRows, i, - startTime, - pThreadInfo->buffer); + generated = prepareStbStmtInterlace( + superTblInfo, + pThreadInfo->stmt, + tableName, + tableSeq, + batchPerTbl, + insertRows, i, + startTime, + &(pThreadInfo->samplePos)); #else - generated = -1; + generated = -1; #endif - } else { - generated = generateStbInterlaceData( - superTblInfo, - tableName, batchPerTbl, i, - batchPerTblTimes, - tableSeq, - pThreadInfo, pstr, - insertRows, - startTime, - &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { - debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, - tableName, batchPerTbl, startTime); + } else { + generated = generateStbInterlaceData( + superTblInfo, + tableName, batchPerTbl, i, + batchPerTblTimes, + tableSeq, + pThreadInfo, pstr, + insertRows, + startTime, + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { + debugPrint("[%d] %s() LN%d, tableName:%s, batch:%d startTime:%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, + tableName, batchPerTbl, startTime); #if STMT_IFACE_ENABLED == 1 - generated = prepareStmtWithoutStb( - pThreadInfo->stmt, tableName, - batchPerTbl, - insertRows, i, - startTime); + generated = prepareStmtWithoutStb( + pThreadInfo->stmt, tableName, + batchPerTbl, + insertRows, i, + startTime); #else - generated = -1; + generated = -1; #endif - } else { - generated = generateInterlaceDataWithoutStb( - tableName, batchPerTbl, - tableSeq, - pThreadInfo->db_name, pstr, - insertRows, - startTime, - &remainderBufLen); - } - } - - debugPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - if (generated < 0) { - errorPrint("[%d] %s() LN%d, generated records is %d\n", - pThreadInfo->threadID, __func__, __LINE__, generated); - goto free_of_interlace; - } else if (generated == 0) { - break; - } - - tableSeq ++; - recOfBatch += batchPerTbl; - - pstr += (oldRemainderLen - remainderBufLen); - pThreadInfo->totalInsertRows += batchPerTbl; - - verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", - pThreadInfo->threadID, __func__, __LINE__, - batchPerTbl, recOfBatch); - - if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { - // turn to first table - tableSeq = pThreadInfo->start_table_from; - generatedRecPerTbl += batchPerTbl; - - startTime = pThreadInfo->start_time - + generatedRecPerTbl * nTimeStampStep; - - flagSleep = true; - if (generatedRecPerTbl >= insertRows) - break; - - int64_t remainRows = insertRows - generatedRecPerTbl; - if ((remainRows > 0) && (batchPerTbl > remainRows)) - batchPerTbl = remainRows; - - if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + } else { + generated = generateInterlaceDataWithoutStb( + tableName, batchPerTbl, + tableSeq, + pThreadInfo->db_name, pstr, + insertRows, + startTime, + &remainderBufLen); + } + } + + debugPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + if (generated < 0) { + errorPrint("[%d] %s() LN%d, generated records is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_of_interlace; + } else if (generated == 0) { break; - } + } - verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", - pThreadInfo->threadID, __func__, __LINE__, - generatedRecPerTbl, insertRows); + tableSeq ++; + recOfBatch += batchPerTbl; - if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) - break; - } + pstr += (oldRemainderLen - remainderBufLen); + pThreadInfo->totalInsertRows += batchPerTbl; - verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", - pThreadInfo->threadID, __func__, __LINE__, recOfBatch, - pThreadInfo->totalInsertRows); - verbosePrint("[%d] %s() LN%d, buffer=%s\n", - pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); + verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl, recOfBatch); - startTs = taosGetTimestampMs(); + if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { + // turn to first table + tableSeq = pThreadInfo->start_table_from; + generatedRecPerTbl += batchPerTbl; - if (recOfBatch == 0) { - errorPrint("[%d] %s() LN%d try inserting records of batch is %d\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch); - errorPrint("%s\n", "\tPlease check if the batch or the buffer length is proper value!\n"); - goto free_of_interlace; - } - int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + startTime = pThreadInfo->start_time + + generatedRecPerTbl * nTimeStampStep; - endTs = taosGetTimestampMs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n", - __func__, __LINE__, delay); - verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); + flagSleep = true; + if (generatedRecPerTbl >= insertRows) + break; - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; + int64_t remainRows = insertRows - generatedRecPerTbl; + if ((remainRows > 0) && (batchPerTbl > remainRows)) + batchPerTbl = remainRows; - if (recOfBatch != affectedRows) { - errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", - pThreadInfo->threadID, __func__, __LINE__, - recOfBatch, affectedRows, pThreadInfo->buffer); - goto free_of_interlace; - } + if (pThreadInfo->ntables * batchPerTbl < g_args.num_of_RPR) + break; + } + + verbosePrint("[%d] %s() LN%d generatedRecPerTbl=%"PRId64" insertRows=%"PRId64"\n", + pThreadInfo->threadID, __func__, __LINE__, + generatedRecPerTbl, insertRows); - pThreadInfo->totalAffectedRows += affectedRows; + if ((g_args.num_of_RPR - recOfBatch) < batchPerTbl) + break; + } + + verbosePrint("[%d] %s() LN%d recOfBatch=%d totalInsertRows=%"PRIu64"\n", + pThreadInfo->threadID, __func__, __LINE__, recOfBatch, + pThreadInfo->totalInsertRows); + verbosePrint("[%d] %s() LN%d, buffer=%s\n", + pThreadInfo->threadID, __func__, __LINE__, pThreadInfo->buffer); + + startTs = taosGetTimestampMs(); + + if (recOfBatch == 0) { + errorPrint("[%d] %s() LN%d Failed to insert records of batch %d\n", + pThreadInfo->threadID, __func__, __LINE__, + batchPerTbl); + if (batchPerTbl > 0) { + errorPrint("\tIf the batch is %d, the length of the SQL to insert a row must be less then %"PRId64"\n", + batchPerTbl, maxSqlLen / batchPerTbl); + } + errorPrint("\tPlease check if the buffer length(%"PRId64") or batch(%d) is set with proper value!\n", + maxSqlLen, batchPerTbl); + goto free_of_interlace; + } + int64_t affectedRows = execInsert(pThreadInfo, recOfBatch); + + endTs = taosGetTimestampMs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %"PRIu64"ms\n", + __func__, __LINE__, delay); + verbosePrint("[%d] %s() LN%d affectedRows=%"PRId64"\n", + pThreadInfo->threadID, + __func__, __LINE__, affectedRows); + + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; + + if (recOfBatch != affectedRows) { + errorPrint("[%d] %s() LN%d execInsert insert %d, affected rows: %"PRId64"\n%s\n", + pThreadInfo->threadID, __func__, __LINE__, + recOfBatch, affectedRows, pThreadInfo->buffer); + goto free_of_interlace; + } + + pThreadInfo->totalAffectedRows += affectedRows; - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRIu64 ", affected rows: %"PRIu64 "\n", pThreadInfo->threadID, pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } + lastPrintTime = currentPrintTime; + } - if ((insert_interval) && flagSleep) { - et = taosGetTimestampMs(); + if ((insert_interval) && flagSleep) { + et = taosGetTimestampMs(); - if (insert_interval > (et - st) ) { - uint64_t sleepTime = insert_interval - (et -st); - performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", - __func__, __LINE__, sleepTime); - taosMsleep(sleepTime); // ms - sleepTimeTotal += insert_interval; - } + if (insert_interval > (et - st) ) { + uint64_t sleepTime = insert_interval - (et -st); + performancePrint("%s() LN%d sleep: %"PRId64" ms for insert interval\n", + __func__, __LINE__, sleepTime); + taosMsleep(sleepTime); // ms + sleepTimeTotal += insert_interval; + } + } } - } free_of_interlace: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; } // sync insertion progressive data static void* syncWriteProgressive(threadInfo *pThreadInfo) { - debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); + debugPrint("%s() LN%d: ### progressive write\n", __func__, __LINE__); - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; - int64_t timeStampStep = - superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int64_t insertRows = + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + uint64_t maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int64_t timeStampStep = + superTblInfo?superTblInfo->timeStampStep:g_args.timestamp_step; + int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; - verbosePrint("%s() LN%d insertRows=%"PRId64"\n", + verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); - pThreadInfo->buffer = calloc(maxSqlLen, 1); - if (NULL == pThreadInfo->buffer) { - errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", - maxSqlLen, - strerror(errno)); - return NULL; - } + pThreadInfo->buffer = calloc(maxSqlLen, 1); + if (NULL == pThreadInfo->buffer) { + errorPrint( "Failed to alloc %"PRIu64" Bytes, reason:%s\n", + maxSqlLen, + strerror(errno)); + return NULL; + } - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); - uint64_t endTs; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); + uint64_t endTs; - pThreadInfo->totalInsertRows = 0; - pThreadInfo->totalAffectedRows = 0; + pThreadInfo->totalInsertRows = 0; + pThreadInfo->totalAffectedRows = 0; - pThreadInfo->samplePos = 0; + pThreadInfo->samplePos = 0; - for (uint64_t tableSeq = pThreadInfo->start_table_from; - tableSeq <= pThreadInfo->end_table_to; - tableSeq ++) { - int64_t start_time = pThreadInfo->start_time; + for (uint64_t tableSeq = pThreadInfo->start_table_from; + tableSeq <= pThreadInfo->end_table_to; + tableSeq ++) { + int64_t start_time = pThreadInfo->start_time; - for (uint64_t i = 0; i < insertRows;) { - char tableName[TSDB_TABLE_NAME_LEN]; - getTableName(tableName, pThreadInfo, tableSeq); - verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", - __func__, __LINE__, - pThreadInfo->threadID, tableSeq, tableName); + for (uint64_t i = 0; i < insertRows;) { + char tableName[TSDB_TABLE_NAME_LEN]; + getTableName(tableName, pThreadInfo, tableSeq); + verbosePrint("%s() LN%d: tid=%d seq=%"PRId64" tableName=%s\n", + __func__, __LINE__, + pThreadInfo->threadID, tableSeq, tableName); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + free(pThreadInfo->buffer); + return NULL; + } - int64_t remainderBufLen = maxSqlLen; - char *pstr = pThreadInfo->buffer; + int64_t remainderBufLen = maxSqlLen; + char *pstr = pThreadInfo->buffer; - int len = snprintf(pstr, - strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); + int len = snprintf(pstr, + strlen(STR_INSERT_INTO) + 1, "%s", STR_INSERT_INTO); - pstr += len; - remainderBufLen -= len; + pstr += len; + remainderBufLen -= len; - int32_t generated; - if (superTblInfo) { - if (superTblInfo->iface == STMT_IFACE) { + int32_t generated; + if (superTblInfo) { + if (superTblInfo->iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStbStmt( - superTblInfo, - pThreadInfo->stmt, - tableName, - g_args.num_of_RPR, - insertRows, i, start_time, pstr); + generated = prepareStbStmtProgressive( + superTblInfo, + pThreadInfo->stmt, + tableName, + tableSeq, + g_args.num_of_RPR, + insertRows, i, start_time, + &(pThreadInfo->samplePos)); #else - generated = -1; + generated = -1; #endif - } else { - generated = generateStbProgressiveData( - superTblInfo, - tableName, tableSeq, pThreadInfo->db_name, pstr, - insertRows, i, start_time, - &(pThreadInfo->samplePos), - &remainderBufLen); - } - } else { - if (g_args.iface == STMT_IFACE) { + } else { + generated = generateStbProgressiveData( + superTblInfo, + tableName, tableSeq, pThreadInfo->db_name, pstr, + insertRows, i, start_time, + &(pThreadInfo->samplePos), + &remainderBufLen); + } + } else { + if (g_args.iface == STMT_IFACE) { #if STMT_IFACE_ENABLED == 1 - generated = prepareStmtWithoutStb( - pThreadInfo->stmt, - tableName, - g_args.num_of_RPR, - insertRows, i, - start_time); + generated = prepareStmtWithoutStb( + pThreadInfo->stmt, + tableName, + g_args.num_of_RPR, + insertRows, i, + start_time); #else - generated = -1; + generated = -1; #endif - } else { - generated = generateProgressiveDataWithoutStb( - tableName, - /* tableSeq, */ - pThreadInfo, pstr, insertRows, - i, start_time, - /* &(pThreadInfo->samplePos), */ - &remainderBufLen); - } - } - if (generated > 0) - i += generated; - else - goto free_of_progressive; - - start_time += generated * timeStampStep; - pThreadInfo->totalInsertRows += generated; - - startTs = taosGetTimestampMs(); - - int32_t affectedRows = execInsert(pThreadInfo, generated); - - endTs = taosGetTimestampMs(); - uint64_t delay = endTs - startTs; - performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", - __func__, __LINE__, delay); - verbosePrint("[%d] %s() LN%d affectedRows=%d\n", - pThreadInfo->threadID, - __func__, __LINE__, affectedRows); + } else { + generated = generateProgressiveDataWithoutStb( + tableName, + /* tableSeq, */ + pThreadInfo, pstr, insertRows, + i, start_time, + /* &(pThreadInfo->samplePos), */ + &remainderBufLen); + } + } + if (generated > 0) + i += generated; + else + goto free_of_progressive; - if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; - if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; - pThreadInfo->cntDelay++; - pThreadInfo->totalDelay += delay; + start_time += generated * timeStampStep; + pThreadInfo->totalInsertRows += generated; - if (affectedRows < 0) { - errorPrint("%s() LN%d, affected rows: %d\n", - __func__, __LINE__, affectedRows); - goto free_of_progressive; - } + startTs = taosGetTimestampMs(); - pThreadInfo->totalAffectedRows += affectedRows; + int32_t affectedRows = execInsert(pThreadInfo, generated); - int64_t currentPrintTime = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + endTs = taosGetTimestampMs(); + uint64_t delay = endTs - startTs; + performancePrint("%s() LN%d, insert execution time is %"PRId64"ms\n", + __func__, __LINE__, delay); + verbosePrint("[%d] %s() LN%d affectedRows=%d\n", pThreadInfo->threadID, - pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - lastPrintTime = currentPrintTime; - } + __func__, __LINE__, affectedRows); - if (i >= insertRows) - break; - } // num_of_DPT + if (delay > pThreadInfo->maxDelay) pThreadInfo->maxDelay = delay; + if (delay < pThreadInfo->minDelay) pThreadInfo->minDelay = delay; + pThreadInfo->cntDelay++; + pThreadInfo->totalDelay += delay; - if ((g_args.verbose_print) && - (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) && - (0 == strncasecmp( - superTblInfo->dataSource, "sample", strlen("sample")))) { - verbosePrint("%s() LN%d samplePos=%"PRId64"\n", - __func__, __LINE__, pThreadInfo->samplePos); - } - } // tableSeq + if (affectedRows < 0) { + errorPrint("%s() LN%d, affected rows: %d\n", + __func__, __LINE__, affectedRows); + goto free_of_progressive; + } + + pThreadInfo->totalAffectedRows += affectedRows; + + int64_t currentPrintTime = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently inserted rows: %"PRId64 ", affected rows: %"PRId64 "\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + lastPrintTime = currentPrintTime; + } + + if (i >= insertRows) + break; + } // num_of_DPT + + if ((g_args.verbose_print) && + (tableSeq == pThreadInfo->ntables - 1) && (superTblInfo) + && (0 == strncasecmp( + superTblInfo->dataSource, + "sample", strlen("sample")))) { + verbosePrint("%s() LN%d samplePos=%"PRId64"\n", + __func__, __LINE__, pThreadInfo->samplePos); + } + } // tableSeq free_of_progressive: - tmfree(pThreadInfo->buffer); - printStatPerThread(pThreadInfo); - return NULL; + tmfree(pThreadInfo->buffer); + printStatPerThread(pThreadInfo); + return NULL; } static void* syncWrite(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + setThreadName("syncWrite"); + + uint32_t interlaceRows; - uint32_t interlaceRows; + if (superTblInfo) { + if ((superTblInfo->interlaceRows == 0) + && (g_args.interlace_rows > 0)) { + interlaceRows = g_args.interlace_rows; + } else { + interlaceRows = superTblInfo->interlaceRows; + } + } else { + interlaceRows = g_args.interlace_rows; + } - if (superTblInfo) { - if ((superTblInfo->interlaceRows == 0) - && (g_args.interlace_rows > 0)) { - interlaceRows = g_args.interlace_rows; + if (interlaceRows > 0) { + // interlace mode + return syncWriteInterlace(pThreadInfo); } else { - interlaceRows = superTblInfo->interlaceRows; - } - } else { - interlaceRows = g_args.interlace_rows; - } - - if (interlaceRows > 0) { - // interlace mode - return syncWriteInterlace(pThreadInfo); - } else { - // progressive mode - return syncWriteProgressive(pThreadInfo); - } + // progressive mode + return syncWriteProgressive(pThreadInfo); + } } static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* pThreadInfo = (threadInfo*)param; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - - int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - if (insert_interval) { - pThreadInfo->et = taosGetTimestampMs(); - if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { - taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms - } - } - - char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); - char data[MAX_DATA_SIZE]; - char *pstr = buffer; - pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", - pThreadInfo->db_name, pThreadInfo->tb_prefix, - pThreadInfo->start_table_from); -// if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { - if (pThreadInfo->counter >= g_args.num_of_RPR) { - pThreadInfo->start_table_from++; - pThreadInfo->counter = 0; - } - if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { - tsem_post(&pThreadInfo->lock_sem); - free(buffer); - taos_free_result(res); - return; - } - - for (int i = 0; i < g_args.num_of_RPR; i++) { - int rand_num = taosRandom() % 100; - if (0 != pThreadInfo->superTblInfo->disorderRatio - && rand_num < pThreadInfo->superTblInfo->disorderRatio) { - int64_t d = pThreadInfo->lastTs - - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->superTblInfo, data, d); - } else { - generateStbRowData(pThreadInfo->superTblInfo, - data, pThreadInfo->lastTs += 1000); + threadInfo* pThreadInfo = (threadInfo*)param; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + + int insert_interval = + superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + if (insert_interval) { + pThreadInfo->et = taosGetTimestampMs(); + if ((pThreadInfo->et - pThreadInfo->st) < insert_interval) { + taosMsleep(insert_interval - (pThreadInfo->et - pThreadInfo->st)); // ms + } + } + + char *buffer = calloc(1, pThreadInfo->superTblInfo->maxSqlLen); + char data[MAX_DATA_SIZE]; + char *pstr = buffer; + pstr += sprintf(pstr, "insert into %s.%s%"PRId64" values", + pThreadInfo->db_name, pThreadInfo->tb_prefix, + pThreadInfo->start_table_from); + // if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + if (pThreadInfo->counter >= g_args.num_of_RPR) { + pThreadInfo->start_table_from++; + pThreadInfo->counter = 0; + } + if (pThreadInfo->start_table_from > pThreadInfo->end_table_to) { + tsem_post(&pThreadInfo->lock_sem); + free(buffer); + taos_free_result(res); + return; } - pstr += sprintf(pstr, "%s", data); - pThreadInfo->counter++; - if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { - break; + for (int i = 0; i < g_args.num_of_RPR; i++) { + int rand_num = taosRandom() % 100; + if (0 != pThreadInfo->superTblInfo->disorderRatio + && rand_num < pThreadInfo->superTblInfo->disorderRatio) { + int64_t d = pThreadInfo->lastTs + - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); + generateStbRowData(pThreadInfo->superTblInfo, data, d); + } else { + generateStbRowData(pThreadInfo->superTblInfo, + data, pThreadInfo->lastTs += 1000); + } + pstr += sprintf(pstr, "%s", data); + pThreadInfo->counter++; + + if (pThreadInfo->counter >= pThreadInfo->superTblInfo->insertRows) { + break; + } } - } - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); - free(buffer); + if (insert_interval) { + pThreadInfo->st = taosGetTimestampMs(); + } + taos_query_a(pThreadInfo->taos, buffer, callBack, pThreadInfo); + free(buffer); - taos_free_result(res); + taos_free_result(res); } static void *asyncWrite(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + threadInfo *pThreadInfo = (threadInfo *)sarg; + SSuperTable* superTblInfo = pThreadInfo->superTblInfo; - pThreadInfo->st = 0; - pThreadInfo->et = 0; - pThreadInfo->lastTs = pThreadInfo->start_time; + setThreadName("asyncWrite"); - int insert_interval = - superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; - if (insert_interval) { - pThreadInfo->st = taosGetTimestampMs(); - } - taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); + pThreadInfo->st = 0; + pThreadInfo->et = 0; + pThreadInfo->lastTs = pThreadInfo->start_time; + + int insert_interval = + superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + if (insert_interval) { + pThreadInfo->st = taosGetTimestampMs(); + } + taos_query_a(pThreadInfo->taos, "show databases", callBack, pThreadInfo); - tsem_wait(&(pThreadInfo->lock_sem)); + tsem_wait(&(pThreadInfo->lock_sem)); - return NULL; + return NULL; } static int convertHostToServAddr(char *host, uint16_t port, struct sockaddr_in *serv_addr) { - uint16_t rest_port = port + TSDB_PORT_HTTP; - struct hostent *server = gethostbyname(host); - if ((server == NULL) || (server->h_addr == NULL)) { - errorPrint("%s", "ERROR, no such host"); - return -1; - } + uint16_t rest_port = port + TSDB_PORT_HTTP; + struct hostent *server = gethostbyname(host); + if ((server == NULL) || (server->h_addr == NULL)) { + errorPrint("%s", "ERROR, no such host"); + return -1; + } - debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", + debugPrint("h_name: %s\nh_addr=%p\nh_addretype: %s\nh_length: %d\n", server->h_name, server->h_addr, (server->h_addrtype == AF_INET)?"ipv4":"ipv6", server->h_length); - memset(serv_addr, 0, sizeof(struct sockaddr_in)); - serv_addr->sin_family = AF_INET; - serv_addr->sin_port = htons(rest_port); + memset(serv_addr, 0, sizeof(struct sockaddr_in)); + serv_addr->sin_family = AF_INET; + serv_addr->sin_port = htons(rest_port); #ifdef WINDOWS - serv_addr->sin_addr.s_addr = inet_addr(host); + serv_addr->sin_addr.s_addr = inet_addr(host); #else - memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); + memcpy(&(serv_addr->sin_addr.s_addr), server->h_addr, server->h_length); #endif - return 0; + return 0; } static void startMultiThreadInsertData(int threads, char* db_name, - char* precision,SSuperTable* superTblInfo) { - - int32_t timePrec = TSDB_TIME_PRECISION_MILLI; - if (0 != precision[0]) { - if (0 == strncasecmp(precision, "ms", 2)) { - timePrec = TSDB_TIME_PRECISION_MILLI; - } else if (0 == strncasecmp(precision, "us", 2)) { - timePrec = TSDB_TIME_PRECISION_MICRO; - } else if (0 == strncasecmp(precision, "ns", 2)) { - timePrec = TSDB_TIME_PRECISION_NANO; - } else { - errorPrint("Not support precision: %s\n", precision); - exit(-1); + char* precision, SSuperTable* superTblInfo) { + + int32_t timePrec = TSDB_TIME_PRECISION_MILLI; + if (0 != precision[0]) { + if (0 == strncasecmp(precision, "ms", 2)) { + timePrec = TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(precision, "us", 2)) { + timePrec = TSDB_TIME_PRECISION_MICRO; +#if NANO_SECOND_ENABLED == 1 + } else if (0 == strncasecmp(precision, "ns", 2)) { + timePrec = TSDB_TIME_PRECISION_NANO; +#endif + } else { + errorPrint("Not support precision: %s\n", precision); + exit(-1); + } } - } - int64_t start_time; - if (superTblInfo) { - if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { - start_time = taosGetTimestamp(timePrec); + int64_t start_time; + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + start_time = taosGetTimestamp(timePrec); + } else { + if (TSDB_CODE_SUCCESS != taosParseTime( + superTblInfo->startTimestamp, + &start_time, + strlen(superTblInfo->startTimestamp), + timePrec, 0)) { + ERROR_EXIT("failed to parse time!\n"); + } + } } else { - if (TSDB_CODE_SUCCESS != taosParseTime( - superTblInfo->startTimestamp, - &start_time, - strlen(superTblInfo->startTimestamp), - timePrec, 0)) { - ERROR_EXIT("failed to parse time!\n"); - } - } - } else { - start_time = 1500000000000; - } - - int64_t start = taosGetTimestampMs(); - - // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(-1); - } - } - - // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(-1); - } - } - - TAOS* taos0 = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); - if (NULL == taos0) { - errorPrint("%s() LN%d, connect to server fail , reason: %s\n", - __func__, __LINE__, taos_errstr(NULL)); - exit(-1); - } + start_time = 1500000000000; + } + debugPrint("%s() LN%d, start_time= %"PRId64"\n", + __func__, __LINE__, start_time); - int64_t ntables = 0; - uint64_t tableFrom; + int64_t start = taosGetTimestampMs(); - if (superTblInfo) { - int64_t limit; - uint64_t offset; + // read sample data from file first + if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + "sample", strlen("sample")))) { + if (0 != prepareSampleDataForSTable(superTblInfo)) { + errorPrint("%s() LN%d, prepare sample data for stable failed!\n", + __func__, __LINE__); + exit(-1); + } + } - if ((NULL != g_args.sqlFile) && (superTblInfo->childTblExists == TBL_NO_EXISTS) && - ((superTblInfo->childTblOffset != 0) || (superTblInfo->childTblLimit >= 0))) { - printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); + TAOS* taos0 = taos_connect( + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); + if (NULL == taos0) { + errorPrint("%s() LN%d, connect to server fail , reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); + exit(-1); } - if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { - if ((superTblInfo->childTblLimit < 0) - || ((superTblInfo->childTblOffset + superTblInfo->childTblLimit) - > (superTblInfo->childTblCount))) { - superTblInfo->childTblLimit = - superTblInfo->childTblCount - superTblInfo->childTblOffset; - } + int64_t ntables = 0; + uint64_t tableFrom; + + if (superTblInfo) { + int64_t limit; + uint64_t offset; + + if ((NULL != g_args.sqlFile) + && (superTblInfo->childTblExists == TBL_NO_EXISTS) + && ((superTblInfo->childTblOffset != 0) + || (superTblInfo->childTblLimit >= 0))) { + printf("WARNING: offset and limit will not be used since the child tables not exists!\n"); + } + + if (superTblInfo->childTblExists == TBL_ALREADY_EXISTS) { + if ((superTblInfo->childTblLimit < 0) + || ((superTblInfo->childTblOffset + + superTblInfo->childTblLimit) + > (superTblInfo->childTblCount))) { + superTblInfo->childTblLimit = + superTblInfo->childTblCount - superTblInfo->childTblOffset; + } + + offset = superTblInfo->childTblOffset; + limit = superTblInfo->childTblLimit; + } else { + limit = superTblInfo->childTblCount; + offset = 0; + } + + ntables = limit; + tableFrom = offset; + + if ((superTblInfo->childTblExists != TBL_NO_EXISTS) + && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) + > superTblInfo->childTblCount)) { + printf("WARNING: specified offset + limit > child table count!\n"); + prompt(); + } + + if ((superTblInfo->childTblExists != TBL_NO_EXISTS) + && (0 == superTblInfo->childTblLimit)) { + printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); + prompt(); + } + + superTblInfo->childTblName = (char*)calloc(1, + limit * TSDB_TABLE_NAME_LEN); + if (superTblInfo->childTblName == NULL) { + errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); + taos_close(taos0); + exit(-1); + } - offset = superTblInfo->childTblOffset; - limit = superTblInfo->childTblLimit; + int64_t childTblCount; + getChildNameOfSuperTableWithLimitAndOffset( + taos0, + db_name, superTblInfo->sTblName, + &superTblInfo->childTblName, &childTblCount, + limit, + offset); } else { - limit = superTblInfo->childTblCount; - offset = 0; + ntables = g_args.num_of_tables; + tableFrom = 0; } - ntables = limit; - tableFrom = offset; + taos_close(taos0); - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && ((superTblInfo->childTblOffset + superTblInfo->childTblLimit ) - > superTblInfo->childTblCount)) { - printf("WARNING: specified offset + limit > child table count!\n"); - prompt(); + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; } - if ((superTblInfo->childTblExists != TBL_NO_EXISTS) - && (0 == superTblInfo->childTblLimit)) { - printf("WARNING: specified limit = 0, which cannot find table name to insert or query! \n"); - prompt(); + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; } - superTblInfo->childTblName = (char*)calloc(1, - limit * TSDB_TABLE_NAME_LEN); - if (superTblInfo->childTblName == NULL) { - errorPrint("%s() LN%d, alloc memory failed!\n", __func__, __LINE__); - taos_close(taos0); - exit(-1); + if ((superTblInfo) + && (superTblInfo->iface == REST_IFACE)) { + if (convertHostToServAddr( + g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { + exit(-1); + } } - int64_t childTblCount; - getChildNameOfSuperTableWithLimitAndOffset( - taos0, - db_name, superTblInfo->sTblName, - &superTblInfo->childTblName, &childTblCount, - limit, - offset); - } else { - ntables = g_args.num_of_tables; - tableFrom = 0; - } - - taos_close(taos0); + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + assert(pids != NULL); - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(infos != NULL); - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } + memset(pids, 0, threads * sizeof(pthread_t)); + memset(infos, 0, threads * sizeof(threadInfo)); - if ((superTblInfo) - && (superTblInfo->iface == REST_IFACE)) { - if (convertHostToServAddr( - g_Dbs.host, g_Dbs.port, &(g_Dbs.serv_addr)) != 0) { - exit(-1); - } - } - - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - assert(pids != NULL); - - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - assert(infos != NULL); - - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE); - pThreadInfo->time_precision = timePrec; - pThreadInfo->superTblInfo = superTblInfo; - - pThreadInfo->start_time = start_time; - pThreadInfo->minDelay = UINT64_MAX; - - if ((NULL == superTblInfo) || - (superTblInfo->iface != REST_IFACE)) { - //t_info->taos = taos; - pThreadInfo->taos = taos_connect( - g_Dbs.host, g_Dbs.user, - g_Dbs.password, db_name, g_Dbs.port); - if (NULL == pThreadInfo->taos) { - errorPrint( - "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", - __func__, __LINE__, - taos_errstr(NULL)); - free(infos); - exit(-1); - } + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->threadID = i; + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); + pThreadInfo->time_precision = timePrec; + pThreadInfo->superTblInfo = superTblInfo; + + pThreadInfo->start_time = start_time; + pThreadInfo->minDelay = UINT64_MAX; + + if ((NULL == superTblInfo) || + (superTblInfo->iface != REST_IFACE)) { + //t_info->taos = taos; + pThreadInfo->taos = taos_connect( + g_Dbs.host, g_Dbs.user, + g_Dbs.password, db_name, g_Dbs.port); + if (NULL == pThreadInfo->taos) { + errorPrint( + "%s() LN%d, connect to server fail from insert sub thread, reason: %s\n", + __func__, __LINE__, + taos_errstr(NULL)); + free(infos); + exit(-1); + } #if STMT_IFACE_ENABLED == 1 - if ((g_args.iface == STMT_IFACE) - || ((superTblInfo) && (superTblInfo->iface == STMT_IFACE))) { - - int columnCount; - if (superTblInfo) { - columnCount = superTblInfo->columnCount; - } else { - columnCount = g_args.num_of_CPR; - } - - pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); - if (NULL == pThreadInfo->stmt) { - errorPrint( - "%s() LN%d, failed init stmt, reason: %s\n", - __func__, __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); - exit(-1); - } + if ((g_args.iface == STMT_IFACE) + || ((superTblInfo) + && (superTblInfo->iface == STMT_IFACE))) { + + int columnCount; + if (superTblInfo) { + columnCount = superTblInfo->columnCount; + } else { + columnCount = g_args.num_of_CPR; + } + + pThreadInfo->stmt = taos_stmt_init(pThreadInfo->taos); + if (NULL == pThreadInfo->stmt) { + errorPrint( + "%s() LN%d, failed init stmt, reason: %s\n", + __func__, __LINE__, + taos_errstr(NULL)); + free(pids); + free(infos); + exit(-1); + } + + char buffer[BUFFER_SIZE]; + char *pstr = buffer; + + if ((superTblInfo) + && (AUTO_CREATE_SUBTBL + == superTblInfo->autoCreateTable)) { + pstr += sprintf(pstr, "INSERT INTO ? USING %s TAGS(?", + superTblInfo->sTblName); + for (int tag = 0; tag < (superTblInfo->tagCount - 1); + tag ++ ) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ") VALUES(?"); + } else { + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); + } - char buffer[3000]; - char *pstr = buffer; - pstr += sprintf(pstr, "INSERT INTO ? values(?"); + for (int col = 0; col < columnCount; col ++) { + pstr += sprintf(pstr, ",?"); + } + pstr += sprintf(pstr, ")"); - for (int col = 0; col < columnCount; col ++) { - pstr += sprintf(pstr, ",?"); + debugPrint("%s() LN%d, buffer: %s", __func__, __LINE__, buffer); + int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); + if (ret != 0){ + errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", + ret, taos_stmt_errstr(pThreadInfo->stmt)); + free(pids); + free(infos); + exit(-1); + } + } +#endif + } else { + pThreadInfo->taos = NULL; } - pstr += sprintf(pstr, ")"); - int ret = taos_stmt_prepare(pThreadInfo->stmt, buffer, 0); - if (ret != 0){ - errorPrint("failed to execute taos_stmt_prepare. return 0x%x. reason: %s\n", - ret, taos_errstr(NULL)); - free(pids); - free(infos); - exit(-1); + /* if ((NULL == superTblInfo) + || (0 == superTblInfo->multiThreadWriteOneTbl)) { + */ + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + /* } else { + pThreadInfo->start_table_from = 0; + pThreadInfo->ntables = superTblInfo->childTblCount; + pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); + } + */ + tsem_init(&(pThreadInfo->lock_sem), 0, 0); + if (ASYNC_MODE == g_Dbs.asyncMode) { + pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); + } else { + pthread_create(pids + i, NULL, syncWrite, pThreadInfo); } - } -#endif - } else { - pThreadInfo->taos = NULL; - } - -/* if ((NULL == superTblInfo) - || (0 == superTblInfo->multiThreadWriteOneTbl)) { - */ - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; -/* } else { - pThreadInfo->start_table_from = 0; - pThreadInfo->ntables = superTblInfo->childTblCount; - pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint(); - } -*/ - tsem_init(&(pThreadInfo->lock_sem), 0, 0); - if (ASYNC_MODE == g_Dbs.asyncMode) { - pthread_create(pids + i, NULL, asyncWrite, pThreadInfo); - } else { - pthread_create(pids + i, NULL, syncWrite, pThreadInfo); } - } - for (int i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } + for (int i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); + } - uint64_t totalDelay = 0; - uint64_t maxDelay = 0; - uint64_t minDelay = UINT64_MAX; - uint64_t cntDelay = 1; - double avgDelay = 0; + uint64_t totalDelay = 0; + uint64_t maxDelay = 0; + uint64_t minDelay = UINT64_MAX; + uint64_t cntDelay = 1; + double avgDelay = 0; - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; - tsem_destroy(&(pThreadInfo->lock_sem)); + tsem_destroy(&(pThreadInfo->lock_sem)); #if STMT_IFACE_ENABLED == 1 - if (pThreadInfo->stmt) { - taos_stmt_close(pThreadInfo->stmt); - } + if (pThreadInfo->stmt) { + taos_stmt_close(pThreadInfo->stmt); + } #endif - tsem_destroy(&(pThreadInfo->lock_sem)); - taos_close(pThreadInfo->taos); + tsem_destroy(&(pThreadInfo->lock_sem)); + taos_close(pThreadInfo->taos); - debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->threadID, pThreadInfo->totalInsertRows, - pThreadInfo->totalAffectedRows); - if (superTblInfo) { - superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; - superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows; - } else { - g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; - g_args.totalInsertRows += pThreadInfo->totalInsertRows; + debugPrint("%s() LN%d, [%d] totalInsert=%"PRIu64" totalAffected=%"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->threadID, pThreadInfo->totalInsertRows, + pThreadInfo->totalAffectedRows); + if (superTblInfo) { + superTblInfo->totalAffectedRows += pThreadInfo->totalAffectedRows; + superTblInfo->totalInsertRows += pThreadInfo->totalInsertRows; + } else { + g_args.totalAffectedRows += pThreadInfo->totalAffectedRows; + g_args.totalInsertRows += pThreadInfo->totalInsertRows; + } + + totalDelay += pThreadInfo->totalDelay; + cntDelay += pThreadInfo->cntDelay; + if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; + if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; } + cntDelay -= 1; - totalDelay += pThreadInfo->totalDelay; - cntDelay += pThreadInfo->cntDelay; - if (pThreadInfo->maxDelay > maxDelay) maxDelay = pThreadInfo->maxDelay; - if (pThreadInfo->minDelay < minDelay) minDelay = pThreadInfo->minDelay; - } - cntDelay -= 1; + if (cntDelay == 0) cntDelay = 1; + avgDelay = (double)totalDelay / cntDelay; - if (cntDelay == 0) cntDelay = 1; - avgDelay = (double)totalDelay / cntDelay; + int64_t end = taosGetTimestampMs(); + int64_t t = end - start; - int64_t end = taosGetTimestampMs(); - int64_t t = end - start; + double tInMs = t/1000.0; - if (superTblInfo) { - fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - t / 1000.0, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, - (double)superTblInfo->totalInsertRows / (t / 1000.0)); + if (superTblInfo) { + fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", + tInMs, superTblInfo->totalInsertRows, + superTblInfo->totalAffectedRows, + threads, db_name, superTblInfo->sTblName, + (tInMs)? + (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", + tInMs, superTblInfo->totalInsertRows, + superTblInfo->totalAffectedRows, + threads, db_name, superTblInfo->sTblName, + (tInMs)? + (double)(superTblInfo->totalInsertRows/tInMs):FLT_MAX); + } + } else { + fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", + tInMs, g_args.totalInsertRows, + g_args.totalAffectedRows, + threads, db_name, + (tInMs)? + (double)(g_args.totalInsertRows/tInMs):FLT_MAX); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", + tInMs, g_args.totalInsertRows, + g_args.totalAffectedRows, + threads, db_name, + (tInMs)? + (double)(g_args.totalInsertRows/tInMs):FLT_MAX); + } + } + fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n", + avgDelay, maxDelay, minDelay); if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s.%s. %.2f records/second\n\n", - t / 1000.0, superTblInfo->totalInsertRows, - superTblInfo->totalAffectedRows, - threads, db_name, superTblInfo->sTblName, - (double)superTblInfo->totalInsertRows / (t / 1000.0)); - } - } else { - fprintf(stderr, "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", - t / 1000.0, g_args.totalInsertRows, - g_args.totalAffectedRows, - threads, db_name, - (double)g_args.totalInsertRows / (t / 1000.0)); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.2f seconds to insert rows: %"PRIu64", affected rows: %"PRIu64" with %d thread(s) into %s %.2f records/second\n\n", - t * 1000.0, g_args.totalInsertRows, - g_args.totalAffectedRows, - threads, db_name, - (double)g_args.totalInsertRows / (t / 1000.0)); - } - } - - fprintf(stderr, "insert delay, avg: %10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n", - avgDelay, maxDelay, minDelay); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n", - avgDelay, maxDelay, minDelay); - } - - //taos_close(taos); - - free(pids); - free(infos); + fprintf(g_fpOfInsertResult, "insert delay, avg:%10.2fms, max: %"PRIu64"ms, min: %"PRIu64"ms\n\n", + avgDelay, maxDelay, minDelay); + } + + //taos_close(taos); + + free(pids); + free(infos); } static void *readTable(void *sarg) { #if 1 - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS *taos = pThreadInfo->taos; - char command[BUFFER_SIZE] = "\0"; - uint64_t sTime = pThreadInfo->start_time; - char *tb_prefix = pThreadInfo->tb_prefix; - FILE *fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); - return NULL; - } - - int64_t num_of_DPT; -/* if (pThreadInfo->superTblInfo) { - num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table; - } else { - */ - num_of_DPT = g_args.num_of_DPT; -// } - - int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; - int64_t totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; - - int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; - if (!do_aggreFunc) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%"PRId64" records:\n", totalData); - fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); - - for (int j = 0; j < n; j++) { - double totalT = 0; - uint64_t count = 0; - for (int64_t i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64, - aggreFunc[j], tb_prefix, i, sTime); - - double t = taosGetTimestampMs(); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); - - if (code != 0) { - errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - fclose(fp); + threadInfo *pThreadInfo = (threadInfo *)sarg; + TAOS *taos = pThreadInfo->taos; + setThreadName("readTable"); + char command[BUFFER_SIZE] = "\0"; + uint64_t sTime = pThreadInfo->start_time; + char *tb_prefix = pThreadInfo->tb_prefix; + FILE *fp = fopen(pThreadInfo->filePath, "a"); + if (NULL == fp) { + errorPrint( "fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); return NULL; - } + } - while(taos_fetch_row(pSql) != NULL) { - count++; - } + int64_t num_of_DPT; + /* if (pThreadInfo->superTblInfo) { + num_of_DPT = pThreadInfo->superTblInfo->insertRows; // nrecords_per_table; + } else { + */ + num_of_DPT = g_args.num_of_DPT; + // } + + int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; + int64_t totalData = num_of_DPT * num_of_tables; + bool do_aggreFunc = g_Dbs.do_aggreFunc; + + int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; + if (!do_aggreFunc) { + printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); + } + printf("%"PRId64" records:\n", totalData); + fprintf(fp, "| QFunctions | QRecords | QSpeed(R/s) | QLatency(ms) |\n"); + + for (int j = 0; j < n; j++) { + double totalT = 0; + uint64_t count = 0; + for (int64_t i = 0; i < num_of_tables; i++) { + sprintf(command, "select %s from %s%"PRId64" where ts>= %" PRIu64, + aggreFunc[j], tb_prefix, i, sTime); + + double t = taosGetTimestampMs(); + TAOS_RES *pSql = taos_query(taos, command); + int32_t code = taos_errno(pSql); + + if (code != 0) { + errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + fclose(fp); + return NULL; + } + + while(taos_fetch_row(pSql) != NULL) { + count++; + } - t = taosGetTimestampMs() - t; - totalT += t; + t = taosGetTimestampMs() - t; + totalT += t; - taos_free_result(pSql); - } + taos_free_result(pSql); + } - fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", - aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, - (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); - printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT * 1000); - } - fprintf(fp, "\n"); - fclose(fp); + fprintf(fp, "|%10s | %"PRId64" | %12.2f | %10.2f |\n", + aggreFunc[j][0] == '*' ? " * " : aggreFunc[j], totalData, + (double)(num_of_tables * num_of_DPT) / totalT, totalT * 1000); + printf("select %10s took %.6f second(s)\n", aggreFunc[j], totalT * 1000); + } + fprintf(fp, "\n"); + fclose(fp); #endif - return NULL; + return NULL; } static void *readMetric(void *sarg) { #if 1 - threadInfo *pThreadInfo = (threadInfo *)sarg; - TAOS *taos = pThreadInfo->taos; - char command[BUFFER_SIZE] = "\0"; - FILE *fp = fopen(pThreadInfo->filePath, "a"); - if (NULL == fp) { - printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); - return NULL; - } + threadInfo *pThreadInfo = (threadInfo *)sarg; + TAOS *taos = pThreadInfo->taos; + setThreadName("readMetric"); + char command[BUFFER_SIZE] = "\0"; + FILE *fp = fopen(pThreadInfo->filePath, "a"); + if (NULL == fp) { + printf("fopen %s fail, reason:%s.\n", pThreadInfo->filePath, strerror(errno)); + return NULL; + } - int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows; - int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; - int64_t totalData = num_of_DPT * num_of_tables; - bool do_aggreFunc = g_Dbs.do_aggreFunc; + int64_t num_of_DPT = pThreadInfo->superTblInfo->insertRows; + int64_t num_of_tables = pThreadInfo->ntables; // rinfo->end_table_to - rinfo->start_table_from + 1; + int64_t totalData = num_of_DPT * num_of_tables; + bool do_aggreFunc = g_Dbs.do_aggreFunc; - int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; - if (!do_aggreFunc) { - printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); - } - printf("%"PRId64" records:\n", totalData); - fprintf(fp, "Querying On %"PRId64" records:\n", totalData); + int n = do_aggreFunc ? (sizeof(aggreFunc) / sizeof(aggreFunc[0])) : 2; + if (!do_aggreFunc) { + printf("\nThe first field is either Binary or Bool. Aggregation functions are not supported.\n"); + } + printf("%"PRId64" records:\n", totalData); + fprintf(fp, "Querying On %"PRId64" records:\n", totalData); - for (int j = 0; j < n; j++) { - char condition[COND_BUF_LEN] = "\0"; - char tempS[64] = "\0"; + for (int j = 0; j < n; j++) { + char condition[COND_BUF_LEN] = "\0"; + char tempS[64] = "\0"; - int64_t m = 10 < num_of_tables ? 10 : num_of_tables; + int64_t m = 10 < num_of_tables ? 10 : num_of_tables; - for (int64_t i = 1; i <= m; i++) { - if (i == 1) { - sprintf(tempS, "t1 = %"PRId64"", i); - } else { - sprintf(tempS, " or t1 = %"PRId64" ", i); - } - strncat(condition, tempS, COND_BUF_LEN - 1); + for (int64_t i = 1; i <= m; i++) { + if (i == 1) { + sprintf(tempS, "t1 = %"PRId64"", i); + } else { + sprintf(tempS, " or t1 = %"PRId64" ", i); + } + strncat(condition, tempS, COND_BUF_LEN - 1); - sprintf(command, "select %s from meters where %s", aggreFunc[j], condition); + sprintf(command, "select %s from meters where %s", aggreFunc[j], condition); - printf("Where condition: %s\n", condition); - fprintf(fp, "%s\n", command); + printf("Where condition: %s\n", condition); + fprintf(fp, "%s\n", command); - double t = taosGetTimestampMs(); + double t = taosGetTimestampMs(); - TAOS_RES *pSql = taos_query(taos, command); - int32_t code = taos_errno(pSql); + TAOS_RES *pSql = taos_query(taos, command); + int32_t code = taos_errno(pSql); - if (code != 0) { - errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); - taos_free_result(pSql); - taos_close(taos); - fclose(fp); - return NULL; - } - int count = 0; - while(taos_fetch_row(pSql) != NULL) { - count++; - } - t = taosGetTimestampMs() - t; + if (code != 0) { + errorPrint( "Failed to query:%s\n", taos_errstr(pSql)); + taos_free_result(pSql); + taos_close(taos); + fclose(fp); + return NULL; + } + int count = 0; + while(taos_fetch_row(pSql) != NULL) { + count++; + } + t = taosGetTimestampMs() - t; - fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", - num_of_tables * num_of_DPT / (t * 1000.0), t); - printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t * 1000.0); + fprintf(fp, "| Speed: %12.2f(per s) | Latency: %.4f(ms) |\n", + num_of_tables * num_of_DPT / (t * 1000.0), t); + printf("select %10s took %.6f second(s)\n\n", aggreFunc[j], t * 1000.0); - taos_free_result(pSql); + taos_free_result(pSql); + } + fprintf(fp, "\n"); } - fprintf(fp, "\n"); - } - fclose(fp); + fclose(fp); #endif - return NULL; + return NULL; } static void prompt() { - if (!g_args.answer_yes) { - printf(" Press enter key to continue or Ctrl-C to stop\n\n"); - (void)getchar(); - } + if (!g_args.answer_yes) { + printf(" Press enter key to continue or Ctrl-C to stop\n\n"); + (void)getchar(); + } } static int insertTestProcess() { - setupForAnsiEscape(); - int ret = printfInsertMeta(); - resetAfterAnsiEscape(); + setupForAnsiEscape(); + int ret = printfInsertMeta(); + resetAfterAnsiEscape(); - if (ret == -1) - exit(EXIT_FAILURE); + if (ret == -1) + exit(EXIT_FAILURE); - debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); - g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); - if (NULL == g_fpOfInsertResult) { - errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile); - return -1; - } + debugPrint("%d result file: %s\n", __LINE__, g_Dbs.resultFile); + g_fpOfInsertResult = fopen(g_Dbs.resultFile, "a"); + if (NULL == g_fpOfInsertResult) { + errorPrint( "Failed to open %s for save result\n", g_Dbs.resultFile); + return -1; + } - if (g_fpOfInsertResult) - printfInsertMetaToFile(g_fpOfInsertResult); + if (g_fpOfInsertResult) + printfInsertMetaToFile(g_fpOfInsertResult); - prompt(); + prompt(); - init_rand_data(); + init_rand_data(); - // create database and super tables - if(createDatabasesAndStables() != 0) { - if (g_fpOfInsertResult) - fclose(g_fpOfInsertResult); - return -1; - } + // create database and super tables + if(createDatabasesAndStables() != 0) { + if (g_fpOfInsertResult) + fclose(g_fpOfInsertResult); + return -1; + } - // pretreatement - prepareSampleData(); + // pretreatement + if (prepareSampleData() != 0) { + if (g_fpOfInsertResult) + fclose(g_fpOfInsertResult); + return -1; + } - double start; - double end; + double start; + double end; - // create child tables - start = taosGetTimestampMs(); - createChildTables(); - end = taosGetTimestampMs(); + // create child tables + start = taosGetTimestampMs(); + createChildTables(); + end = taosGetTimestampMs(); - if (g_totalChildTables > 0) { - fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); - if (g_fpOfInsertResult) { - fprintf(g_fpOfInsertResult, - "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", - (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + if (g_totalChildTables > 0) { + fprintf(stderr, "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", + (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + if (g_fpOfInsertResult) { + fprintf(g_fpOfInsertResult, + "Spent %.4f seconds to create %"PRId64" tables with %d thread(s)\n\n", + (end - start)/1000.0, g_totalChildTables, g_Dbs.threadCountByCreateTbl); + } } - } - // create sub threads for inserting data - //start = taosGetTimestampMs(); - for (int i = 0; i < g_Dbs.dbCount; i++) { - if (g_Dbs.use_metric) { - if (g_Dbs.db[i].superTblCount > 0) { - for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // create sub threads for inserting data + //start = taosGetTimestampMs(); + for (int i = 0; i < g_Dbs.dbCount; i++) { + if (g_Dbs.use_metric) { + if (g_Dbs.db[i].superTblCount > 0) { + for (uint64_t j = 0; j < g_Dbs.db[i].superTblCount; j++) { - SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; + SSuperTable* superTblInfo = &g_Dbs.db[i].superTbls[j]; - if (superTblInfo && (superTblInfo->insertRows > 0)) { + if (superTblInfo && (superTblInfo->insertRows > 0)) { + startMultiThreadInsertData( + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + superTblInfo); + } + } + } + } else { startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, - superTblInfo); - } + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, + NULL); } - } - } else { - startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, - NULL); - } - } - //end = taosGetTimestampMs(); - - //int64_t totalInsertRows = 0; - //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { - // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; - // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; - //} - //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalInsertRows, totalAffectedRows, g_Dbs.threadCount); - postFreeResource(); - - return 0; + } + //end = taosGetTimestampMs(); + + //int64_t totalInsertRows = 0; + //int64_t totalAffectedRows = 0; + //for (int i = 0; i < g_Dbs.dbCount; i++) { + // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { + // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; + // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; + //} + //printf("Spent %.4f seconds to insert rows: %"PRId64", affected rows: %"PRId64" with %d thread(s)\n\n", end - start, totalInsertRows, totalAffectedRows, g_Dbs.threadCount); + postFreeResource(); + + return 0; } static void *specifiedTableQuery(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; + threadInfo *pThreadInfo = (threadInfo *)sarg; - if (pThreadInfo->taos == NULL) { - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; - } else { - pThreadInfo->taos = taos; + setThreadName("specTableQuery"); + + if (pThreadInfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + pThreadInfo->taos = taos; + } } - } - char sqlStr[MAX_DB_NAME_SIZE + 5]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint( "use database %s failed!\n\n", + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); + errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); - return NULL; - } + return NULL; + } - uint64_t st = 0; - uint64_t et = 0; + uint64_t st = 0; + uint64_t et = 0; - uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; + uint64_t queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; - uint64_t totalQueried = 0; - uint64_t lastPrintTime = taosGetTimestampMs(); - uint64_t startTs = taosGetTimestampMs(); + uint64_t totalQueried = 0; + uint64_t lastPrintTime = taosGetTimestampMs(); + uint64_t startTs = taosGetTimestampMs(); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { + sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - } - - while(queryTimes --) { - if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < - (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { - taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms } - st = taosGetTimestampMs(); + while(queryTimes --) { + if (g_queryInfo.specifiedQueryInfo.queryInterval && (et - st) < + (int64_t)g_queryInfo.specifiedQueryInfo.queryInterval) { + taosMsleep(g_queryInfo.specifiedQueryInfo.queryInterval - (et - st)); // ms + } + + st = taosGetTimestampMs(); - selectAndGetResult(pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); + selectAndGetResult(pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq]); - et = taosGetTimestampMs(); - printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", - taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0); + et = taosGetTimestampMs(); + printf("=thread[%"PRId64"] use %s complete one sql, Spent %10.3f s\n", + taosGetSelfPthreadId(), g_queryInfo.queryMode, (et - st)/1000.0); - totalQueried ++; - g_queryInfo.specifiedQueryInfo.totalQueried ++; + totalQueried ++; + g_queryInfo.specifiedQueryInfo.totalQueried ++; - uint64_t currentPrintTime = taosGetTimestampMs(); - uint64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n", - __func__, __LINE__, endTs, startTs); - printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", + uint64_t currentPrintTime = taosGetTimestampMs(); + uint64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + debugPrint("%s() LN%d, endTs=%"PRIu64"ms, startTs=%"PRIu64"ms\n", + __func__, __LINE__, endTs, startTs); + printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.6f\n", pThreadInfo->threadID, totalQueried, (double)(totalQueried/((endTs-startTs)/1000.0))); - lastPrintTime = currentPrintTime; + lastPrintTime = currentPrintTime; + } } - } - return NULL; + return NULL; } static void replaceChildTblName(char* inSql, char* outSql, int tblIndex) { - char sourceString[32] = "xxxx"; - char subTblName[MAX_TB_NAME_SIZE*3]; - sprintf(subTblName, "%s.%s", - g_queryInfo.dbName, - g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); + char sourceString[32] = "xxxx"; + char subTblName[TSDB_TABLE_NAME_LEN]; + sprintf(subTblName, "%s.%s", + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); - //printf("inSql: %s\n", inSql); + //printf("inSql: %s\n", inSql); - char* pos = strstr(inSql, sourceString); - if (0 == pos) { - return; - } - - tstrncpy(outSql, inSql, pos - inSql + 1); - //printf("1: %s\n", outSql); - strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); - //printf("2: %s\n", outSql); - strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); - //printf("3: %s\n", outSql); + char* pos = strstr(inSql, sourceString); + if (0 == pos) { + return; + } + + tstrncpy(outSql, inSql, pos - inSql + 1); + //printf("1: %s\n", outSql); + strncat(outSql, subTblName, MAX_QUERY_SQL_LENGTH - 1); + //printf("2: %s\n", outSql); + strncat(outSql, pos+strlen(sourceString), MAX_QUERY_SQL_LENGTH - 1); + //printf("3: %s\n", outSql); } static void *superTableQuery(void *sarg) { - char sqlstr[MAX_QUERY_SQL_LENGTH]; - threadInfo *pThreadInfo = (threadInfo *)sarg; + char sqlstr[MAX_QUERY_SQL_LENGTH]; + threadInfo *pThreadInfo = (threadInfo *)sarg; - if (pThreadInfo->taos == NULL) { - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); - if (taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; - } else { - pThreadInfo->taos = taos; - } - } + setThreadName("superTableQuery"); - uint64_t st = 0; - uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; + if (pThreadInfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + pThreadInfo->taos = taos; + } + } - uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; - uint64_t totalQueried = 0; - uint64_t startTs = taosGetTimestampMs(); + uint64_t st = 0; + uint64_t et = (int64_t)g_queryInfo.superQueryInfo.queryInterval; - uint64_t lastPrintTime = taosGetTimestampMs(); - while(queryTimes --) { - if (g_queryInfo.superQueryInfo.queryInterval - && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { - taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms - //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); - } + uint64_t queryTimes = g_queryInfo.superQueryInfo.queryTimes; + uint64_t totalQueried = 0; + uint64_t startTs = taosGetTimestampMs(); - st = taosGetTimestampMs(); - for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { - memset(sqlstr,0,sizeof(sqlstr)); - replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); - if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[j], - pThreadInfo->threadID); + uint64_t lastPrintTime = taosGetTimestampMs(); + while(queryTimes --) { + if (g_queryInfo.superQueryInfo.queryInterval + && (et - st) < (int64_t)g_queryInfo.superQueryInfo.queryInterval) { + taosMsleep(g_queryInfo.superQueryInfo.queryInterval - (et - st)); // ms + //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, pThreadInfo->start_table_from, pThreadInfo->end_table_to); } - selectAndGetResult(pThreadInfo, sqlstr); - totalQueried++; - g_queryInfo.superQueryInfo.totalQueried ++; - - int64_t currentPrintTime = taosGetTimestampMs(); - int64_t endTs = taosGetTimestampMs(); - if (currentPrintTime - lastPrintTime > 30*1000) { - printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n", - pThreadInfo->threadID, - totalQueried, - (double)(totalQueried/((endTs-startTs)/1000.0))); - lastPrintTime = currentPrintTime; + st = taosGetTimestampMs(); + for (int i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { + for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { + memset(sqlstr,0,sizeof(sqlstr)); + replaceChildTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); + if (g_queryInfo.superQueryInfo.result[j][0] != '\0') { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.superQueryInfo.result[j], + pThreadInfo->threadID); + } + selectAndGetResult(pThreadInfo, sqlstr); + + totalQueried++; + g_queryInfo.superQueryInfo.totalQueried ++; + + int64_t currentPrintTime = taosGetTimestampMs(); + int64_t endTs = taosGetTimestampMs(); + if (currentPrintTime - lastPrintTime > 30*1000) { + printf("thread[%d] has currently completed queries: %"PRIu64", QPS: %10.3f\n", + pThreadInfo->threadID, + totalQueried, + (double)(totalQueried/((endTs-startTs)/1000.0))); + lastPrintTime = currentPrintTime; + } + } } - } + et = taosGetTimestampMs(); + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n", + taosGetSelfPthreadId(), + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, + (double)(et - st)/1000.0); } - et = taosGetTimestampMs(); - printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%"PRIu64" - %"PRIu64"] once queries duration:%.4fs\n\n", - taosGetSelfPthreadId(), - pThreadInfo->start_table_from, - pThreadInfo->end_table_to, - (double)(et - st)/1000.0); - } - return NULL; + return NULL; } static int queryTestProcess() { - setupForAnsiEscape(); - printfQueryMeta(); - resetAfterAnsiEscape(); - - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, - g_queryInfo.port); - if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(-1); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, - g_queryInfo.dbName, - g_queryInfo.superQueryInfo.sTblName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } + setupForAnsiEscape(); + printfQueryMeta(); + resetAfterAnsiEscape(); + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + exit(-1); + } - prompt(); + if (0 != g_queryInfo.superQueryInfo.sqlCount) { + getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.sTblName, + &g_queryInfo.superQueryInfo.childTblName, + &g_queryInfo.superQueryInfo.childTblCount); + } - if (g_args.debug_print || g_args.verbose_print) { - printfQuerySystemInfo(taos); - } + prompt(); - if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { - if (convertHostToServAddr( - g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) - exit(-1); - } + if (g_args.debug_print || g_args.verbose_print) { + printfQuerySystemInfo(taos); + } - pthread_t *pids = NULL; - threadInfo *infos = NULL; - //==== create sub threads for query from specify table - int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; - uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; + if (0 == strncasecmp(g_queryInfo.queryMode, "rest", strlen("rest"))) { + if (convertHostToServAddr( + g_queryInfo.host, g_queryInfo.port, &g_queryInfo.serv_addr) != 0) + exit(-1); + } - uint64_t startTs = taosGetTimestampMs(); + pthread_t *pids = NULL; + threadInfo *infos = NULL; + //==== create sub threads for query from specify table + int nConcurrent = g_queryInfo.specifiedQueryInfo.concurrent; + uint64_t nSqlCount = g_queryInfo.specifiedQueryInfo.sqlCount; - if ((nSqlCount > 0) && (nConcurrent > 0)) { + uint64_t startTs = taosGetTimestampMs(); - pids = malloc(nConcurrent * nSqlCount * sizeof(pthread_t)); - infos = malloc(nConcurrent * nSqlCount * sizeof(threadInfo)); + if ((nSqlCount > 0) && (nConcurrent > 0)) { - if ((NULL == pids) || (NULL == infos)) { - taos_close(taos); - ERROR_EXIT("memory allocation failed for create threads\n"); - } + pids = calloc(1, nConcurrent * nSqlCount * sizeof(pthread_t)); + infos = calloc(1, nConcurrent * nSqlCount * sizeof(threadInfo)); - for (uint64_t i = 0; i < nSqlCount; i++) { - for (int j = 0; j < nConcurrent; j++) { - uint64_t seq = i * nConcurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = seq; - pThreadInfo->querySeq = i; + if ((NULL == pids) || (NULL == infos)) { + taos_close(taos); + ERROR_EXIT("memory allocation failed for create threads\n"); + } - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + for (uint64_t i = 0; i < nSqlCount; i++) { + for (int j = 0; j < nConcurrent; j++) { + uint64_t seq = i * nConcurrent + j; + threadInfo *pThreadInfo = infos + seq; + pThreadInfo->threadID = seq; + pThreadInfo->querySeq = i; - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(taos); - free(infos); - free(pids); - errorPrint( "use database %s failed!\n\n", - g_queryInfo.dbName); - return -1; + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "USE %s", g_queryInfo.dbName); + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(taos); + free(infos); + free(pids); + errorPrint( "use database %s failed!\n\n", + g_queryInfo.dbName); + return -1; + } } - } - pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection; + pThreadInfo->taos = NULL;// TODO: workaround to use separate taos connection; - pthread_create(pids + seq, NULL, specifiedTableQuery, - pThreadInfo); + pthread_create(pids + seq, NULL, specifiedTableQuery, + pThreadInfo); + } } + } else { + g_queryInfo.specifiedQueryInfo.concurrent = 0; } - } else { - g_queryInfo.specifiedQueryInfo.concurrent = 0; - } - taos_close(taos); + taos_close(taos); - pthread_t *pidsOfSub = NULL; - threadInfo *infosOfSub = NULL; - //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.superQueryInfo.sqlCount > 0) - && (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); + pthread_t *pidsOfSub = NULL; + threadInfo *infosOfSub = NULL; + //==== create sub threads for query from all sub table of the super table + if ((g_queryInfo.superQueryInfo.sqlCount > 0) + && (g_queryInfo.superQueryInfo.threadCnt > 0)) { + pidsOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); + infosOfSub = calloc(1, g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); - if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - free(infos); - free(pids); + if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { + free(infos); + free(pids); - ERROR_EXIT("memory allocation failed for create threads\n"); - } + ERROR_EXIT("memory allocation failed for create threads\n"); + } - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; + int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; - } + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; + } - uint64_t tableFrom = 0; - for (int i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infosOfSub + i; - pThreadInfo->threadID = i; + uint64_t tableFrom = 0; + for (int i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infosOfSub + i; + pThreadInfo->threadID = i; + + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; + tableFrom = pThreadInfo->end_table_to + 1; + pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); + } - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = iend_table_to = i < b ? tableFrom + a : tableFrom + a - 1; - tableFrom = pThreadInfo->end_table_to + 1; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfSub + i, NULL, superTableQuery, pThreadInfo); + g_queryInfo.superQueryInfo.threadCnt = threads; + } else { + g_queryInfo.superQueryInfo.threadCnt = 0; } - g_queryInfo.superQueryInfo.threadCnt = threads; - } else { - g_queryInfo.superQueryInfo.threadCnt = 0; - } - - if ((nSqlCount > 0) && (nConcurrent > 0)) { - for (int i = 0; i < nConcurrent; i++) { - for (int j = 0; j < nSqlCount; j++) { - pthread_join(pids[i * nSqlCount + j], NULL); - } + if ((nSqlCount > 0) && (nConcurrent > 0)) { + for (int i = 0; i < nConcurrent; i++) { + for (int j = 0; j < nSqlCount; j++) { + pthread_join(pids[i * nSqlCount + j], NULL); + } + } } - } - tmfree((char*)pids); - tmfree((char*)infos); + tmfree((char*)pids); + tmfree((char*)infos); - for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - } + for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); + } - tmfree((char*)pidsOfSub); - tmfree((char*)infosOfSub); + tmfree((char*)pidsOfSub); + tmfree((char*)infosOfSub); -// taos_close(taos);// TODO: workaround to use separate taos connection; - uint64_t endTs = taosGetTimestampMs(); + // taos_close(taos);// TODO: workaround to use separate taos connection; + uint64_t endTs = taosGetTimestampMs(); - uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + - g_queryInfo.superQueryInfo.totalQueried; + uint64_t totalQueried = g_queryInfo.specifiedQueryInfo.totalQueried + + g_queryInfo.superQueryInfo.totalQueried; - fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n", - totalQueried, - (double)(totalQueried/((endTs-startTs)/1000.0))); - return 0; + fprintf(stderr, "==== completed total queries: %"PRIu64", the QPS of all threads: %10.3f====\n", + totalQueried, + (double)(totalQueried/((endTs-startTs)/1000.0))); + return 0; } static void stable_sub_callback( TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } + if (res == NULL || taos_errno(res) != 0) { + errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + __func__, __LINE__, code, taos_errstr(res)); + return; + } - if (param) - fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + if (param) + fetchResult(res, (threadInfo *)param); + // tao_unscribe() will free result. } static void specified_sub_callback( TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { - if (res == NULL || taos_errno(res) != 0) { - errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", - __func__, __LINE__, code, taos_errstr(res)); - return; - } + if (res == NULL || taos_errno(res) != 0) { + errorPrint("%s() LN%d, failed to subscribe result, code:%d, reason:%s\n", + __func__, __LINE__, code, taos_errstr(res)); + return; + } - if (param) - fetchResult(res, (threadInfo *)param); - // tao_unscribe() will free result. + if (param) + fetchResult(res, (threadInfo *)param); + // tao_unscribe() will free result. } static TAOS_SUB* subscribeImpl( @@ -7203,784 +7702,803 @@ static TAOS_SUB* subscribeImpl( threadInfo *pThreadInfo, char *sql, char* topic, bool restart, uint64_t interval) { - TAOS_SUB* tsub = NULL; - - if ((SPECIFIED_CLASS == class) - && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { - tsub = taos_subscribe( - pThreadInfo->taos, - restart, - topic, sql, specified_sub_callback, (void*)pThreadInfo, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - } else if ((STABLE_CLASS == class) - && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { - tsub = taos_subscribe( - pThreadInfo->taos, - restart, - topic, sql, stable_sub_callback, (void*)pThreadInfo, - g_queryInfo.superQueryInfo.subscribeInterval); - } else { - tsub = taos_subscribe( - pThreadInfo->taos, - restart, - topic, sql, NULL, NULL, interval); - } - - if (tsub == NULL) { - printf("failed to create subscription. topic:%s, sql:%s\n", topic, sql); - return NULL; - } + TAOS_SUB* tsub = NULL; + + if ((SPECIFIED_CLASS == class) + && (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode)) { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, specified_sub_callback, (void*)pThreadInfo, + g_queryInfo.specifiedQueryInfo.subscribeInterval); + } else if ((STABLE_CLASS == class) + && (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode)) { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, stable_sub_callback, (void*)pThreadInfo, + g_queryInfo.superQueryInfo.subscribeInterval); + } else { + tsub = taos_subscribe( + pThreadInfo->taos, + restart, + topic, sql, NULL, NULL, interval); + } - return tsub; + if (tsub == NULL) { + errorPrint("failed to create subscription. topic:%s, sql:%s\n", topic, sql); + return NULL; + } + + return tsub; } static void *superSubscribe(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; - char subSqlstr[MAX_QUERY_SQL_LENGTH]; - TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; - uint64_t tsubSeq; - - if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { - errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", - pThreadInfo->ntables, - MAX_QUERY_SQL_COUNT); - exit(-1); - } - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, - g_queryInfo.port); + threadInfo *pThreadInfo = (threadInfo *)sarg; + char subSqlstr[MAX_QUERY_SQL_LENGTH]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; + uint64_t tsubSeq; + + setThreadName("superSub"); + + if (pThreadInfo->ntables > MAX_QUERY_SQL_COUNT) { + errorPrint("The table number(%"PRId64") of the thread is more than max query sql count: %d\n", + pThreadInfo->ntables, MAX_QUERY_SQL_COUNT); + exit(-1); + } + if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; + pThreadInfo->taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (pThreadInfo->taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + return NULL; + } } - } - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - errorPrint( "use database %s failed!\n\n", - g_queryInfo.dbName); - return NULL; - } - - char topic[32] = {0}; - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - - tsubSeq = i - pThreadInfo->start_table_from; - verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n", - __func__, __LINE__, - pThreadInfo->threadID, - pThreadInfo->start_table_from, - pThreadInfo->end_table_to, i); - sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", - i, pThreadInfo->querySeq); - memset(subSqlstr, 0, sizeof(subSqlstr)); - replaceChildTblName( - g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], - subSqlstr, i); - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - } - - debugPrint("%s() LN%d, [%d] subSqlstr: %s\n", - __func__, __LINE__, pThreadInfo->threadID, subSqlstr); - tsub[tsubSeq] = subscribeImpl( - STABLE_CLASS, - pThreadInfo, subSqlstr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval); - if (NULL == tsub[tsubSeq]) { + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "USE %s", g_queryInfo.dbName); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); + errorPrint( "use database %s failed!\n\n", + g_queryInfo.dbName); return NULL; - } - } + } - // start loop to consume result - int consumed[MAX_QUERY_SQL_COUNT]; - for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { - consumed[i] = 0; - } - TAOS_RES* res = NULL; + char topic[32] = {0}; + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + tsubSeq = i - pThreadInfo->start_table_from; + verbosePrint("%s() LN%d, [%d], start=%"PRId64" end=%"PRId64" i=%"PRIu64"\n", + __func__, __LINE__, + pThreadInfo->threadID, + pThreadInfo->start_table_from, + pThreadInfo->end_table_to, i); + sprintf(topic, "taosdemo-subscribe-%"PRIu64"-%"PRIu64"", + i, pThreadInfo->querySeq); + memset(subSqlstr, 0, sizeof(subSqlstr)); + replaceChildTblName( + g_queryInfo.superQueryInfo.sql[pThreadInfo->querySeq], + subSqlstr, i); + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + + verbosePrint("%s() LN%d, [%d] subSqlstr: %s\n", + __func__, __LINE__, pThreadInfo->threadID, subSqlstr); + tsub[tsubSeq] = subscribeImpl( + STABLE_CLASS, + pThreadInfo, subSqlstr, topic, + g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.superQueryInfo.subscribeInterval); + if (NULL == tsub[tsubSeq]) { + taos_close(pThreadInfo->taos); + return NULL; + } + } + + // start loop to consume result + int consumed[MAX_QUERY_SQL_COUNT]; + for (int i = 0; i < MAX_QUERY_SQL_COUNT; i++) { + consumed[i] = 0; + } + TAOS_RES* res = NULL; + + uint64_t st = 0, et = 0; - uint64_t st = 0, et = 0; + while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) + || (g_queryInfo.superQueryInfo.endAfterConsume > + consumed[pThreadInfo->end_table_to + - pThreadInfo->start_table_from])) { + + verbosePrint("super endAfterConsume: %d, consumed: %d\n", + g_queryInfo.superQueryInfo.endAfterConsume, + consumed[pThreadInfo->end_table_to + - pThreadInfo->start_table_from]); + for (uint64_t i = pThreadInfo->start_table_from; + i <= pThreadInfo->end_table_to; i++) { + tsubSeq = i - pThreadInfo->start_table_from; + if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { + continue; + } - while ((g_queryInfo.superQueryInfo.endAfterConsume == -1) - || (g_queryInfo.superQueryInfo.endAfterConsume < - consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from])) { + st = taosGetTimestampMs(); + performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et)); + res = taos_consume(tsub[tsubSeq]); + et = taosGetTimestampMs(); + performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st)); + + if (res) { + if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + fetchResult(res, pThreadInfo); + } + consumed[tsubSeq] ++; + + if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) + && (consumed[tsubSeq] >= + g_queryInfo.superQueryInfo.resubAfterConsume)) { + verbosePrint("%s() LN%d, keepProgress:%d, resub super table query: %"PRIu64"\n", + __func__, __LINE__, + g_queryInfo.superQueryInfo.subscribeKeepProgress, + pThreadInfo->querySeq); + taos_unsubscribe(tsub[tsubSeq], + g_queryInfo.superQueryInfo.subscribeKeepProgress); + consumed[tsubSeq]= 0; + tsub[tsubSeq] = subscribeImpl( + STABLE_CLASS, + pThreadInfo, subSqlstr, topic, + g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.superQueryInfo.subscribeInterval + ); + if (NULL == tsub[tsubSeq]) { + taos_close(pThreadInfo->taos); + return NULL; + } + } + } + } + } + verbosePrint("%s() LN%d, super endAfterConsume: %d, consumed: %d\n", + __func__, __LINE__, + g_queryInfo.superQueryInfo.endAfterConsume, + consumed[pThreadInfo->end_table_to - pThreadInfo->start_table_from]); + taos_free_result(res); for (uint64_t i = pThreadInfo->start_table_from; i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - if (ASYNC_MODE == g_queryInfo.superQueryInfo.asyncMode) { - continue; - } - - st = taosGetTimestampMs(); - performancePrint("st: %"PRIu64" et: %"PRIu64" st-et: %"PRIu64"\n", st, et, (st - et)); - res = taos_consume(tsub[tsubSeq]); - et = taosGetTimestampMs(); - performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st)); - - if (res) { - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } - consumed[tsubSeq] ++; - - if ((g_queryInfo.superQueryInfo.resubAfterConsume != -1) - && (consumed[tsubSeq] >= - g_queryInfo.superQueryInfo.resubAfterConsume)) { - printf("keepProgress:%d, resub super table query: %"PRIu64"\n", - g_queryInfo.superQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - taos_unsubscribe(tsub[tsubSeq], - g_queryInfo.superQueryInfo.subscribeKeepProgress); - consumed[tsubSeq]= 0; - tsub[tsubSeq] = subscribeImpl( - STABLE_CLASS, - pThreadInfo, subSqlstr, topic, - g_queryInfo.superQueryInfo.subscribeRestart, - g_queryInfo.superQueryInfo.subscribeInterval - ); - if (NULL == tsub[tsubSeq]) { - taos_close(pThreadInfo->taos); - return NULL; - } - } - } + tsubSeq = i - pThreadInfo->start_table_from; + taos_unsubscribe(tsub[tsubSeq], 0); } - } - taos_free_result(res); - for (uint64_t i = pThreadInfo->start_table_from; - i <= pThreadInfo->end_table_to; i++) { - tsubSeq = i - pThreadInfo->start_table_from; - taos_unsubscribe(tsub[tsubSeq], 0); - } - - taos_close(pThreadInfo->taos); - return NULL; + taos_close(pThreadInfo->taos); + return NULL; } static void *specifiedSubscribe(void *sarg) { - threadInfo *pThreadInfo = (threadInfo *)sarg; -// TAOS_SUB* tsub = NULL; - - if (pThreadInfo->taos == NULL) { - pThreadInfo->taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, - g_queryInfo.port); + threadInfo *pThreadInfo = (threadInfo *)sarg; + // TAOS_SUB* tsub = NULL; + + setThreadName("specSub"); + if (pThreadInfo->taos == NULL) { - errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", - pThreadInfo->threadID, taos_errstr(NULL)); - return NULL; + pThreadInfo->taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (pThreadInfo->taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + pThreadInfo->threadID, taos_errstr(NULL)); + return NULL; + } } - } - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { - taos_close(pThreadInfo->taos); - return NULL; - } - - sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - "taosdemo-subscribe-%"PRIu64"-%d", - pThreadInfo->querySeq, - pThreadInfo->threadID); - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { - sprintf(pThreadInfo->filePath, "%s-%d", + char sqlStr[TSDB_DB_NAME_LEN + 5]; + sprintf(sqlStr, "USE %s", g_queryInfo.dbName); + if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(pThreadInfo->taos); + return NULL; + } + + sprintf(g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + "taosdemo-subscribe-%"PRIu64"-%d", + pThreadInfo->querySeq, + pThreadInfo->threadID); + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] != '\0') { + sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - } - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( - SPECIFIED_CLASS, pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; - } - - // start loop to consume result - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) - || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < - g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { - - if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { - continue; - } - - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume( - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); - if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { - if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] - != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult( - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], - pThreadInfo); - } - - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; - if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) - && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= - g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { - printf("keepProgress:%d, resub specified query: %"PRIu64"\n", - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, - pThreadInfo->querySeq); - g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; - taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); - g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = - subscribeImpl( - SPECIFIED_CLASS, - pThreadInfo, - g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], - g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], - g_queryInfo.specifiedQueryInfo.subscribeRestart, - g_queryInfo.specifiedQueryInfo.subscribeInterval); - if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { - taos_close(pThreadInfo->taos); - return NULL; - } - } - } - } - taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]); - taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->querySeq], 0); - taos_close(pThreadInfo->taos); - - return NULL; + } + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = subscribeImpl( + SPECIFIED_CLASS, pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); + if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { + taos_close(pThreadInfo->taos); + return NULL; + } + + // start loop to consume result + + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; + while((g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq] == -1) + || (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] < + g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq])) { + + printf("consumed[%d]: %d, endAfterConsum[%"PRId64"]: %d\n", + pThreadInfo->threadID, + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID], + pThreadInfo->querySeq, + g_queryInfo.specifiedQueryInfo.endAfterConsume[pThreadInfo->querySeq]); + if (ASYNC_MODE == g_queryInfo.specifiedQueryInfo.asyncMode) { + continue; + } + + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID] = taos_consume( + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]); + if (g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]) { + if (g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq][0] + != 0) { + sprintf(pThreadInfo->filePath, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], + pThreadInfo->threadID); + } + fetchResult( + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], + pThreadInfo); + + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; + if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) + && (g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] >= + g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq])) { + printf("keepProgress:%d, resub specified query: %"PRIu64"\n", + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress, + pThreadInfo->querySeq); + g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] = 0; + taos_unsubscribe(g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID] = + subscribeImpl( + SPECIFIED_CLASS, + pThreadInfo, + g_queryInfo.specifiedQueryInfo.sql[pThreadInfo->querySeq], + g_queryInfo.specifiedQueryInfo.topic[pThreadInfo->threadID], + g_queryInfo.specifiedQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeInterval); + if (NULL == g_queryInfo.specifiedQueryInfo.tsub[pThreadInfo->threadID]) { + taos_close(pThreadInfo->taos); + return NULL; + } + } + } + } + taos_free_result(g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID]); + taos_close(pThreadInfo->taos); + + return NULL; } static int subscribeTestProcess() { - setupForAnsiEscape(); - printfQueryMeta(); - resetAfterAnsiEscape(); - - prompt(); - - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, - g_queryInfo.port); - if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); - exit(-1); - } - - if (0 != g_queryInfo.superQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, + setupForAnsiEscape(); + printfQueryMeta(); + resetAfterAnsiEscape(); + + prompt(); + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, g_queryInfo.dbName, - g_queryInfo.superQueryInfo.sTblName, - &g_queryInfo.superQueryInfo.childTblName, - &g_queryInfo.superQueryInfo.childTblCount); - } - - taos_close(taos); // TODO: workaround to use separate taos connection; - - pthread_t *pids = NULL; - threadInfo *infos = NULL; - - pthread_t *pidsOfStable = NULL; - threadInfo *infosOfStable = NULL; - - //==== create threads for query for specified table - if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", - __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount); - } else { - if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { - errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", - __func__, __LINE__, - g_queryInfo.specifiedQueryInfo.sqlCount); + g_queryInfo.port); + if (taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); exit(-1); } - pids = malloc( - g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(pthread_t)); - infos = malloc( - g_queryInfo.specifiedQueryInfo.sqlCount * - g_queryInfo.specifiedQueryInfo.concurrent * - sizeof(threadInfo)); - if ((NULL == pids) || (NULL == infos)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); - exit(-1); + if (0 != g_queryInfo.superQueryInfo.sqlCount) { + getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.sTblName, + &g_queryInfo.superQueryInfo.childTblName, + &g_queryInfo.superQueryInfo.childTblCount); } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { - uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; - threadInfo *pThreadInfo = infos + seq; - pThreadInfo->threadID = seq; - pThreadInfo->querySeq = i; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo); - } - } - } + taos_close(taos); // TODO: workaround to use separate taos connection; - //==== create threads for super table query - if (g_queryInfo.superQueryInfo.sqlCount <= 0) { - debugPrint("%s() LN%d, super table query sqlCount %d.\n", - __func__, __LINE__, - g_queryInfo.superQueryInfo.sqlCount); - } else { - if ((g_queryInfo.superQueryInfo.sqlCount > 0) - && (g_queryInfo.superQueryInfo.threadCnt > 0)) { - pidsOfStable = malloc( - g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(pthread_t)); - infosOfStable = malloc( - g_queryInfo.superQueryInfo.sqlCount * - g_queryInfo.superQueryInfo.threadCnt * - sizeof(threadInfo)); - if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { - errorPrint("%s() LN%d, malloc failed for create threads\n", - __func__, __LINE__); - // taos_close(taos); - exit(-1); - } + pthread_t *pids = NULL; + threadInfo *infos = NULL; - int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; - int threads = g_queryInfo.superQueryInfo.threadCnt; + pthread_t *pidsOfStable = NULL; + threadInfo *infosOfStable = NULL; - int64_t a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; + //==== create threads for query for specified table + if (g_queryInfo.specifiedQueryInfo.sqlCount <= 0) { + debugPrint("%s() LN%d, sepcified query sqlCount %d.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount); + } else { + if (g_queryInfo.specifiedQueryInfo.concurrent <= 0) { + errorPrint("%s() LN%d, sepcified query sqlCount %d.\n", + __func__, __LINE__, + g_queryInfo.specifiedQueryInfo.sqlCount); + exit(-1); } - int64_t b = 0; - if (threads != 0) { - b = ntables % threads; + pids = calloc( + 1, + g_queryInfo.specifiedQueryInfo.sqlCount * + g_queryInfo.specifiedQueryInfo.concurrent * + sizeof(pthread_t)); + infos = calloc( + 1, + g_queryInfo.specifiedQueryInfo.sqlCount * + g_queryInfo.specifiedQueryInfo.concurrent * + sizeof(threadInfo)); + if ((NULL == pids) || (NULL == infos)) { + errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); + exit(-1); } - for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - uint64_t tableFrom = 0; - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - threadInfo *pThreadInfo = infosOfStable + seq; + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { + uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; + threadInfo *pThreadInfo = infos + seq; pThreadInfo->threadID = seq; pThreadInfo->querySeq = i; - - pThreadInfo->start_table_from = tableFrom; - pThreadInfo->ntables = jend_table_to = jend_table_to + 1; - pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; - pthread_create(pidsOfStable + seq, - NULL, superSubscribe, pThreadInfo); + pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pthread_create(pids + seq, NULL, specifiedSubscribe, pThreadInfo); } } + } - g_queryInfo.superQueryInfo.threadCnt = threads; + //==== create threads for super table query + if (g_queryInfo.superQueryInfo.sqlCount <= 0) { + debugPrint("%s() LN%d, super table query sqlCount %d.\n", + __func__, __LINE__, + g_queryInfo.superQueryInfo.sqlCount); + } else { + if ((g_queryInfo.superQueryInfo.sqlCount > 0) + && (g_queryInfo.superQueryInfo.threadCnt > 0)) { + pidsOfStable = calloc( + 1, + g_queryInfo.superQueryInfo.sqlCount * + g_queryInfo.superQueryInfo.threadCnt * + sizeof(pthread_t)); + infosOfStable = calloc( + 1, + g_queryInfo.superQueryInfo.sqlCount * + g_queryInfo.superQueryInfo.threadCnt * + sizeof(threadInfo)); + if ((NULL == pidsOfStable) || (NULL == infosOfStable)) { + errorPrint("%s() LN%d, malloc failed for create threads\n", + __func__, __LINE__); + // taos_close(taos); + exit(-1); + } + + int64_t ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; + + int64_t a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = ntables % threads; + } + + for (uint64_t i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + uint64_t tableFrom = 0; + for (int j = 0; j < threads; j++) { + uint64_t seq = i * threads + j; + threadInfo *pThreadInfo = infosOfStable + seq; + pThreadInfo->threadID = seq; + pThreadInfo->querySeq = i; + + pThreadInfo->start_table_from = tableFrom; + pThreadInfo->ntables = jend_table_to = jend_table_to + 1; + pThreadInfo->taos = NULL; // TODO: workaround to use separate taos connection; + pthread_create(pidsOfStable + seq, + NULL, superSubscribe, pThreadInfo); + } + } - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - for (int j = 0; j < threads; j++) { - uint64_t seq = i * threads + j; - pthread_join(pidsOfStable[seq], NULL); + g_queryInfo.superQueryInfo.threadCnt = threads; + + for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { + for (int j = 0; j < threads; j++) { + uint64_t seq = i * threads + j; + pthread_join(pidsOfStable[seq], NULL); + } } } } - } - for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { - for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { - uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; - pthread_join(pids[seq], NULL); + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + for (int j = 0; j < g_queryInfo.specifiedQueryInfo.concurrent; j++) { + uint64_t seq = i * g_queryInfo.specifiedQueryInfo.concurrent + j; + pthread_join(pids[seq], NULL); + } } - } - tmfree((char*)pids); - tmfree((char*)infos); + tmfree((char*)pids); + tmfree((char*)infos); - tmfree((char*)pidsOfStable); - tmfree((char*)infosOfStable); -// taos_close(taos); - return 0; + tmfree((char*)pidsOfStable); + tmfree((char*)infosOfStable); + // taos_close(taos); + return 0; } static void initOfInsertMeta() { - memset(&g_Dbs, 0, sizeof(SDbs)); + memset(&g_Dbs, 0, sizeof(SDbs)); - // set default values - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - g_Dbs.port = 6030; - tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); - g_Dbs.threadCount = 2; + // set default values + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); + g_Dbs.port = 6030; + tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); + tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); + g_Dbs.threadCount = 2; - g_Dbs.use_metric = g_args.use_metric; + g_Dbs.use_metric = g_args.use_metric; } static void initOfQueryMeta() { - memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); + memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - // set default values - tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - g_queryInfo.port = 6030; - tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); - tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); + // set default values + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); + g_queryInfo.port = 6030; + tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); + tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); } -static void setParaFromArg(){ - if (g_args.host) { - tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); - } else { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); - } +static void setParaFromArg() { + if (g_args.host) { + tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); + } else { + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); + } + + if (g_args.user) { + tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); + } + + if (g_args.password) { + tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); + } - if (g_args.user) { - tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); - } + if (g_args.port) { + g_Dbs.port = g_args.port; + } - if (g_args.password) { - tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); - } + g_Dbs.threadCount = g_args.num_of_threads; + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - if (g_args.port) { - g_Dbs.port = g_args.port; - } + g_Dbs.dbCount = 1; + g_Dbs.db[0].drop = true; - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; + tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); + g_Dbs.db[0].dbCfg.replica = g_args.replica; + tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", SMALL_BUFF_LEN); - g_Dbs.dbCount = 1; - g_Dbs.db[0].drop = true; + tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); - tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); - g_Dbs.db[0].dbCfg.replica = g_args.replica; - tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); + g_Dbs.use_metric = g_args.use_metric; + g_Dbs.insert_only = g_args.insert_only; - tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); + g_Dbs.do_aggreFunc = true; - g_Dbs.use_metric = g_args.use_metric; - g_Dbs.insert_only = g_args.insert_only; + char dataString[TSDB_MAX_BYTES_PER_ROW]; + char **data_type = g_args.datatype; - g_Dbs.do_aggreFunc = true; + memset(dataString, 0, TSDB_MAX_BYTES_PER_ROW); - char dataString[STRING_LEN]; - char **data_type = g_args.datatype; + if (strcasecmp(data_type[0], "BINARY") == 0 + || strcasecmp(data_type[0], "BOOL") == 0 + || strcasecmp(data_type[0], "NCHAR") == 0 ) { + g_Dbs.do_aggreFunc = false; + } - memset(dataString, 0, STRING_LEN); + if (g_args.use_metric) { + g_Dbs.db[0].superTblCount = 1; + tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN); + g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; + g_Dbs.threadCount = g_args.num_of_threads; + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; + g_Dbs.asyncMode = g_args.async_mode; - if (strcasecmp(data_type[0], "BINARY") == 0 - || strcasecmp(data_type[0], "BOOL") == 0 - || strcasecmp(data_type[0], "NCHAR") == 0 ) { - g_Dbs.do_aggreFunc = false; - } + g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; + g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; + g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; + g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; + tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, + g_args.tb_prefix, TBNAME_PREFIX_LEN); + tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", SMALL_BUFF_LEN); - if (g_args.use_metric) { - g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; - g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - g_Dbs.asyncMode = g_args.async_mode; - - g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; - g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; - g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, - g_args.tb_prefix, MAX_TB_NAME_SIZE); - tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].iface = g_args.iface; - tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, - "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP; - - g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; - g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; - - g_Dbs.db[0].superTbls[0].columnCount = 0; - for (int i = 0; i < MAX_NUM_DATATYPE; i++) { - if (data_type[i] == NULL) { - break; - } - - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - data_type[i], MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].columnCount++; - } - - if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { - g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; + if (g_args.iface == INTERFACE_BUT) { + g_Dbs.db[0].superTbls[0].iface = TAOSC_IFACE; + } else { + g_Dbs.db[0].superTbls[0].iface = g_args.iface; + } + tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, + "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].timeStampStep = g_args.timestamp_step; + + g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; + g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; + + g_Dbs.db[0].superTbls[0].columnCount = 0; + for (int i = 0; i < MAX_NUM_COLUMNS; i++) { + if (data_type[i] == NULL) { + break; + } + + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + data_type[i], min(DATATYPE_BUFF_LEN, strlen(data_type[i]) + 1)); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].columnCount++; + } + + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { + g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; + } else { + for (int i = g_Dbs.db[0].superTbls[0].columnCount; + i < g_args.num_of_CPR; i++) { + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); + g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; + g_Dbs.db[0].superTbls[0].columnCount++; + } + } + + tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, + "INT", min(DATATYPE_BUFF_LEN, strlen("INT") + 1)); + g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; + + tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, + "BINARY", min(DATATYPE_BUFF_LEN, strlen("BINARY") + 1)); + g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].tagCount = 2; } else { - for (int i = g_Dbs.db[0].superTbls[0].columnCount; - i < g_args.num_of_CPR; i++) { - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; - g_Dbs.db[0].superTbls[0].columnCount++; - } - } - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, - "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, - "BINARY", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; - g_Dbs.db[0].superTbls[0].tagCount = 0; - } + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; + g_Dbs.db[0].superTbls[0].tagCount = 0; + } } /* Function to do regular expression check */ static int regexMatch(const char *s, const char *reg, int cflags) { - regex_t regex; - char msgbuf[100] = {0}; - - /* Compile regular expression */ - if (regcomp(®ex, reg, cflags) != 0) { - printf("Fail to compile regex\n"); - exit(-1); - } - - /* Execute regular expression */ - int reti = regexec(®ex, s, 0, NULL, 0); - if (!reti) { - regfree(®ex); - return 1; - } else if (reti == REG_NOMATCH) { - regfree(®ex); + regex_t regex; + char msgbuf[100] = {0}; + + /* Compile regular expression */ + if (regcomp(®ex, reg, cflags) != 0) { + printf("Fail to compile regex\n"); + exit(-1); + } + + /* Execute regular expression */ + int reti = regexec(®ex, s, 0, NULL, 0); + if (!reti) { + regfree(®ex); + return 1; + } else if (reti == REG_NOMATCH) { + regfree(®ex); + return 0; + } else { + regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); + printf("Regex match failed: %s\n", msgbuf); + regfree(®ex); + exit(-1); + } + return 0; - } else { - regerror(reti, ®ex, msgbuf, sizeof(msgbuf)); - printf("Regex match failed: %s\n", msgbuf); - regfree(®ex); - exit(-1); - } - - return 0; } static int isCommentLine(char *line) { - if (line == NULL) return 1; + if (line == NULL) return 1; - return regexMatch(line, "^\\s*#.*", REG_EXTENDED); + return regexMatch(line, "^\\s*#.*", REG_EXTENDED); } static void querySqlFile(TAOS* taos, char* sqlFile) { - FILE *fp = fopen(sqlFile, "r"); - if (fp == NULL) { - printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); - return; - } + FILE *fp = fopen(sqlFile, "r"); + if (fp == NULL) { + printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno)); + return; + } - int read_len = 0; - char * cmd = calloc(1, MAX_SQL_SIZE); - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; + int read_len = 0; + char * cmd = calloc(1, TSDB_MAX_BYTES_PER_ROW); + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; - double t = taosGetTimestampMs(); + double t = taosGetTimestampMs(); - while((read_len = tgetline(&line, &line_len, fp)) != -1) { - if (read_len >= MAX_SQL_SIZE) continue; - line[--read_len] = '\0'; + while((read_len = tgetline(&line, &line_len, fp)) != -1) { + if (read_len >= TSDB_MAX_BYTES_PER_ROW) continue; + line[--read_len] = '\0'; - if (read_len == 0 || isCommentLine(line)) { // line starts with # - continue; - } + if (read_len == 0 || isCommentLine(line)) { // line starts with # + continue; + } - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } - memcpy(cmd + cmd_len, line, read_len); - if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { - errorPrint("%s() LN%d, queryDbExec %s failed!\n", - __func__, __LINE__, cmd); - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; + memcpy(cmd + cmd_len, line, read_len); + if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { + errorPrint("%s() LN%d, queryDbExec %s failed!\n", + __func__, __LINE__, cmd); + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; + } + memset(cmd, 0, TSDB_MAX_BYTES_PER_ROW); + cmd_len = 0; } - memset(cmd, 0, MAX_SQL_SIZE); - cmd_len = 0; - } - t = taosGetTimestampMs() - t; - printf("run %s took %.6f second(s)\n\n", sqlFile, t); + t = taosGetTimestampMs() - t; + printf("run %s took %.6f second(s)\n\n", sqlFile, t); - tmfree(cmd); - tmfree(line); - tmfclose(fp); - return; + tmfree(cmd); + tmfree(line); + tmfclose(fp); + return; } static void testMetaFile() { if (INSERT_TEST == g_args.test_mode) { - if (g_Dbs.cfgDir[0]) - taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); + if (g_Dbs.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); - insertTestProcess(); + insertTestProcess(); } else if (QUERY_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + if (g_queryInfo.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - queryTestProcess(); + queryTestProcess(); } else if (SUBSCRIBE_TEST == g_args.test_mode) { - if (g_queryInfo.cfgDir[0]) - taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + if (g_queryInfo.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); - subscribeTestProcess(); + subscribeTestProcess(); } else { - ; + ; } } static void queryResult() { - // query data - - pthread_t read_id; - threadInfo *pThreadInfo = malloc(sizeof(threadInfo)); - assert(pThreadInfo); - pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 - pThreadInfo->start_table_from = 0; - - //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc; - if (g_args.use_metric) { - pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; - pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; - pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - tstrncpy(pThreadInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); - } else { - pThreadInfo->ntables = g_args.num_of_tables; - pThreadInfo->end_table_to = g_args.num_of_tables -1; - tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); - } - - pThreadInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - if (pThreadInfo->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", - taos_errstr(NULL)); + // query data + + pthread_t read_id; + threadInfo *pThreadInfo = calloc(1, sizeof(threadInfo)); + assert(pThreadInfo); + pThreadInfo->start_time = 1500000000000; // 2017-07-14 10:40:00.000 + pThreadInfo->start_table_from = 0; + + //pThreadInfo->do_aggreFunc = g_Dbs.do_aggreFunc; + if (g_args.use_metric) { + pThreadInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; + pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; + pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; + tstrncpy(pThreadInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix, TBNAME_PREFIX_LEN); + } else { + pThreadInfo->ntables = g_args.num_of_tables; + pThreadInfo->end_table_to = g_args.num_of_tables -1; + tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); + } + + pThreadInfo->taos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + if (pThreadInfo->taos == NULL) { + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); + free(pThreadInfo); + exit(-1); + } + + tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); + + if (!g_Dbs.use_metric) { + pthread_create(&read_id, NULL, readTable, pThreadInfo); + } else { + pthread_create(&read_id, NULL, readMetric, pThreadInfo); + } + pthread_join(read_id, NULL); + taos_close(pThreadInfo->taos); free(pThreadInfo); - exit(-1); - } - - tstrncpy(pThreadInfo->filePath, g_Dbs.resultFile, MAX_FILE_NAME_LEN); - - if (!g_Dbs.use_metric) { - pthread_create(&read_id, NULL, readTable, pThreadInfo); - } else { - pthread_create(&read_id, NULL, readMetric, pThreadInfo); - } - pthread_join(read_id, NULL); - taos_close(pThreadInfo->taos); - free(pThreadInfo); } static void testCmdLine() { - if (strlen(configDir)) { - wordexp_t full_path; - if (wordexp(configDir, &full_path, 0) != 0) { - errorPrint( "Invalid path %s\n", configDir); - return; + if (strlen(configDir)) { + wordexp_t full_path; + if (wordexp(configDir, &full_path, 0) != 0) { + errorPrint( "Invalid path %s\n", configDir); + return; + } + taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); + wordfree(&full_path); } - taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); - wordfree(&full_path); - } - g_args.test_mode = INSERT_TEST; - insertTestProcess(); + g_args.test_mode = INSERT_TEST; + insertTestProcess(); - if (false == g_Dbs.insert_only) - queryResult(); + if (false == g_Dbs.insert_only) + queryResult(); } int main(int argc, char *argv[]) { - parse_args(argc, argv, &g_args); - - debugPrint("meta file: %s\n", g_args.metaFile); + parse_args(argc, argv, &g_args); - if (g_args.metaFile) { - initOfInsertMeta(); - initOfQueryMeta(); + debugPrint("meta file: %s\n", g_args.metaFile); - if (false == getInfoFromJsonFile(g_args.metaFile)) { - printf("Failed to read %s\n", g_args.metaFile); - return 1; - } + if (g_args.metaFile) { + initOfInsertMeta(); + initOfQueryMeta(); - testMetaFile(); - } else { - memset(&g_Dbs, 0, sizeof(SDbs)); - setParaFromArg(); - - if (NULL != g_args.sqlFile) { - TAOS* qtaos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, - g_Dbs.port); - querySqlFile(qtaos, g_args.sqlFile); - taos_close(qtaos); + if (false == getInfoFromJsonFile(g_args.metaFile)) { + printf("Failed to read %s\n", g_args.metaFile); + return 1; + } + testMetaFile(); } else { - testCmdLine(); - } + memset(&g_Dbs, 0, sizeof(SDbs)); + setParaFromArg(); + + if (NULL != g_args.sqlFile) { + TAOS* qtaos = taos_connect( + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, + g_Dbs.port); + querySqlFile(qtaos, g_args.sqlFile); + taos_close(qtaos); + + } else { + testCmdLine(); + } - if (g_dupstr) - free(g_dupstr); - } + if (g_dupstr) + free(g_dupstr); + } - return 0; + return 0; } diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index 58897b89e95743c802755c0476f3b2843a244a59..51f4748eab462c8e883e83cd5923f38dd7fb9b5a 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 15db83297c491c43ded56e23c9c6033600b2d2f7..7b2102321685409ec0a9b7cbe8a6667bd6250b81 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -25,14 +25,22 @@ #include "tsclient.h" #include "tsdb.h" #include "tutil.h" +#include -#define COMMAND_SIZE 65536 +#define TSDB_SUPPORT_NANOSECOND 1 + +#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 +#define COMMAND_SIZE 65536 +#define MAX_RECORDS_PER_REQ 32766 //#define DEFAULT_DUMP_FILE "taosdump.sql" -int converStringToReadable(char *str, int size, char *buf, int bufsize); -int convertNCharToReadable(char *str, int size, char *buf, int bufsize); -void taosDumpCharset(FILE *fp); -void taosLoadFileCharset(FILE *fp, char *fcharset); +// for strncpy buffer overflow +#define min(a, b) (((a) < (b)) ? (a) : (b)) + +static int converStringToReadable(char *str, int size, char *buf, int bufsize); +static int convertNCharToReadable(char *str, int size, char *buf, int bufsize); +static void taosDumpCharset(FILE *fp); +static void taosLoadFileCharset(FILE *fp, char *fcharset); typedef struct { short bytes; @@ -52,125 +60,128 @@ typedef struct { fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) #define errorPrint(fmt, ...) \ - do { fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); } while(0) + do { fprintf(stderr, "\033[31m"); fprintf(stderr, "ERROR: "fmt, __VA_ARGS__); fprintf(stderr, "\033[0m"); } while(0) // -------------------------- SHOW DATABASE INTERFACE----------------------- enum _show_db_index { - TSDB_SHOW_DB_NAME_INDEX, - TSDB_SHOW_DB_CREATED_TIME_INDEX, - TSDB_SHOW_DB_NTABLES_INDEX, - TSDB_SHOW_DB_VGROUPS_INDEX, - TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, - TSDB_SHOW_DB_DAYS_INDEX, - TSDB_SHOW_DB_KEEP_INDEX, - TSDB_SHOW_DB_CACHE_INDEX, - TSDB_SHOW_DB_BLOCKS_INDEX, - TSDB_SHOW_DB_MINROWS_INDEX, - TSDB_SHOW_DB_MAXROWS_INDEX, - TSDB_SHOW_DB_WALLEVEL_INDEX, - TSDB_SHOW_DB_FSYNC_INDEX, - TSDB_SHOW_DB_COMP_INDEX, - TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, - TSDB_SHOW_DB_UPDATE_INDEX, - TSDB_SHOW_DB_STATUS_INDEX, - TSDB_MAX_SHOW_DB + TSDB_SHOW_DB_NAME_INDEX, + TSDB_SHOW_DB_CREATED_TIME_INDEX, + TSDB_SHOW_DB_NTABLES_INDEX, + TSDB_SHOW_DB_VGROUPS_INDEX, + TSDB_SHOW_DB_REPLICA_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_DAYS_INDEX, + TSDB_SHOW_DB_KEEP_INDEX, + TSDB_SHOW_DB_CACHE_INDEX, + TSDB_SHOW_DB_BLOCKS_INDEX, + TSDB_SHOW_DB_MINROWS_INDEX, + TSDB_SHOW_DB_MAXROWS_INDEX, + TSDB_SHOW_DB_WALLEVEL_INDEX, + TSDB_SHOW_DB_FSYNC_INDEX, + TSDB_SHOW_DB_COMP_INDEX, + TSDB_SHOW_DB_CACHELAST_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_UPDATE_INDEX, + TSDB_SHOW_DB_STATUS_INDEX, + TSDB_MAX_SHOW_DB }; // -----------------------------------------SHOW TABLES CONFIGURE ------------------------------------- enum _show_tables_index { - TSDB_SHOW_TABLES_NAME_INDEX, - TSDB_SHOW_TABLES_CREATED_TIME_INDEX, - TSDB_SHOW_TABLES_COLUMNS_INDEX, - TSDB_SHOW_TABLES_METRIC_INDEX, - TSDB_SHOW_TABLES_UID_INDEX, - TSDB_SHOW_TABLES_TID_INDEX, - TSDB_SHOW_TABLES_VGID_INDEX, - TSDB_MAX_SHOW_TABLES + TSDB_SHOW_TABLES_NAME_INDEX, + TSDB_SHOW_TABLES_CREATED_TIME_INDEX, + TSDB_SHOW_TABLES_COLUMNS_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_SHOW_TABLES_UID_INDEX, + TSDB_SHOW_TABLES_TID_INDEX, + TSDB_SHOW_TABLES_VGID_INDEX, + TSDB_MAX_SHOW_TABLES }; // ---------------------------------- DESCRIBE METRIC CONFIGURE ------------------------------ enum _describe_table_index { - TSDB_DESCRIBE_METRIC_FIELD_INDEX, - TSDB_DESCRIBE_METRIC_TYPE_INDEX, - TSDB_DESCRIBE_METRIC_LENGTH_INDEX, - TSDB_DESCRIBE_METRIC_NOTE_INDEX, - TSDB_MAX_DESCRIBE_METRIC + TSDB_DESCRIBE_METRIC_FIELD_INDEX, + TSDB_DESCRIBE_METRIC_TYPE_INDEX, + TSDB_DESCRIBE_METRIC_LENGTH_INDEX, + TSDB_DESCRIBE_METRIC_NOTE_INDEX, + TSDB_MAX_DESCRIBE_METRIC }; #define COL_NOTE_LEN 128 typedef struct { - char field[TSDB_COL_NAME_LEN + 1]; - char type[16]; - int length; - char note[COL_NOTE_LEN]; + char field[TSDB_COL_NAME_LEN + 1]; + char type[16]; + int length; + char note[COL_NOTE_LEN]; } SColDes; typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - SColDes cols[]; + char name[TSDB_TABLE_NAME_LEN]; + SColDes cols[]; } STableDef; extern char version[]; +#define DB_PRECISION_LEN 8 +#define DB_STATUS_LEN 16 + typedef struct { - char name[TSDB_DB_NAME_LEN]; - char create_time[32]; - int32_t ntables; - int32_t vgroups; - int16_t replica; - int16_t quorum; - int16_t days; - char keeplist[32]; - //int16_t daysToKeep; - //int16_t daysToKeep1; - //int16_t daysToKeep2; - int32_t cache; //MB - int32_t blocks; - int32_t minrows; - int32_t maxrows; - int8_t wallevel; - int32_t fsync; - int8_t comp; - int8_t cachelast; - char precision[8]; // time resolution - int8_t update; - char status[16]; + char name[TSDB_DB_NAME_LEN]; + char create_time[32]; + int32_t ntables; + int32_t vgroups; + int16_t replica; + int16_t quorum; + int16_t days; + char keeplist[32]; + //int16_t daysToKeep; + //int16_t daysToKeep1; + //int16_t daysToKeep2; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + int8_t cachelast; + char precision[DB_PRECISION_LEN]; // time resolution + int8_t update; + char status[DB_STATUS_LEN]; } SDbInfo; typedef struct { - char name[TSDB_TABLE_NAME_LEN]; - char metric[TSDB_TABLE_NAME_LEN]; + char name[TSDB_TABLE_NAME_LEN]; + char metric[TSDB_TABLE_NAME_LEN]; } STableRecord; typedef struct { - bool isMetric; - STableRecord tableRecord; + bool isMetric; + STableRecord tableRecord; } STableRecordInfo; typedef struct { - pthread_t threadID; - int32_t threadIndex; - int32_t totalThreads; - char dbName[TSDB_DB_NAME_LEN]; - void *taosCon; - int64_t rowsOfDumpOut; - int64_t tablesOfDumpOut; + pthread_t threadID; + int32_t threadIndex; + int32_t totalThreads; + char dbName[TSDB_DB_NAME_LEN]; + void *taosCon; + int64_t rowsOfDumpOut; + int64_t tablesOfDumpOut; } SThreadParaObj; typedef struct { - int64_t totalRowsOfDumpOut; - int64_t totalChildTblsOfDumpOut; - int32_t totalSuperTblsOfDumpOut; - int32_t totalDatabasesOfDumpOut; + int64_t totalRowsOfDumpOut; + int64_t totalChildTblsOfDumpOut; + int32_t totalSuperTblsOfDumpOut; + int32_t totalDatabasesOfDumpOut; } resultStatistics; -static int64_t totalDumpOutRows = 0; +static int64_t g_totalDumpOutRows = 0; -SDbInfo **dbInfos = NULL; +SDbInfo **g_dbInfos = NULL; const char *argp_program_version = version; const char *argp_program_bug_address = ""; @@ -191,1469 +202,1669 @@ static char args_doc[] = "dbname [tbname ...]\n--databases dbname ...\n--all-dat /* The options we understand. */ static struct argp_option options[] = { - // connection option - {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, - {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, - #ifdef _TD_POWER_ - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, - #else - {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, - #endif - {"port", 'P', "PORT", 0, "Port to connect", 0}, - {"cversion", 'v', "CVERION", 0, "client version", 0}, - {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, - // input/output file - {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, - {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, - {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, - #ifdef _TD_POWER_ - {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, - #else - {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, - #endif - {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, - // dump unit options - {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, - {"databases", 'D', 0, 0, "Dump assigned databases", 2}, - // dump format options - {"schemaonly", 's', 0, 0, "Only dump schema.", 3}, - {"without-property", 'N', 0, 0, "Dump schema without properties.", 3}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, - {"end-time", 'E', "END_TIME", 0, "End time to dump. Either Epoch or ISO8601/RFC3339 format is acceptable. Epoch precision millisecond. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 3}, - {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Default is 1.", 3}, - {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, - {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, - {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, - {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 3}, - {"debug", 'g', 0, 0, "Print debug info.", 1}, - {"verbose", 'v', 0, 0, "Print verbose debug info.", 1}, - {0}}; + // connection option + {"host", 'h', "HOST", 0, "Server host dumping data from. Default is localhost.", 0}, + {"user", 'u', "USER", 0, "User name used to connect to server. Default is root.", 0}, +#ifdef _TD_POWER_ + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is powerdb.", 0}, +#else + {"password", 'p', "PASSWORD", 0, "User password to connect to server. Default is taosdata.", 0}, +#endif + {"port", 'P', "PORT", 0, "Port to connect", 0}, + {"cversion", 'v', "CVERION", 0, "client version", 0}, + {"mysqlFlag", 'q', "MYSQLFLAG", 0, "mysqlFlag, Default is 0", 0}, + // input/output file + {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, + {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, +#ifdef _TD_POWER_ + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, +#else + {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/taos/taos.cfg.", 1}, +#endif + {"encode", 'e', "ENCODE", 0, "Input file encoding.", 1}, + // dump unit options + {"all-databases", 'A', 0, 0, "Dump all databases.", 2}, + {"databases", 'D', 0, 0, "Dump assigned databases", 2}, + {"allow-sys", 'a', 0, 0, "Allow to dump sys database", 2}, + // dump format options + {"schemaonly", 's', 0, 0, "Only dump schema.", 2}, + {"without-property", 'N', 0, 0, "Dump schema without properties.", 2}, + {"avro", 'V', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, + {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, + {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T18:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, +#if TSDB_SUPPORT_NANOSECOND == 1 + {"precision", 'C', "PRECISION", 0, "Specify precision for converting human-readable time to epoch. Valid value is one of ms, us, and ns. Default is ms.", 6}, +#else + {"precision", 'C', "PRECISION", 0, "Use specified precision to convert human-readable time. Valid value is one of ms and us. Default is ms.", 6}, +#endif + {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, + {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, + {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, + {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, + {"debug", 'g', 0, 0, "Print debug info.", 8}, + {"verbose", 'b', 0, 0, "Print verbose debug info.", 9}, + {"performanceprint", 'm', 0, 0, "Print performance debug info.", 10}, + {0} +}; /* Used by main to communicate with parse_opt. */ typedef struct arguments { - // connection option - char *host; - char *user; - char *password; - uint16_t port; - char cversion[12]; - uint16_t mysqlFlag; - // output file - char outpath[TSDB_FILENAME_LEN+1]; - char inpath[TSDB_FILENAME_LEN+1]; - // result file - char *resultFile; - char *encode; - // dump unit option - bool all_databases; - bool databases; - // dump format option - bool schemaonly; - bool with_property; - int64_t start_time; - int64_t end_time; - int32_t data_batch; - int32_t max_sql_len; - int32_t table_batch; // num of table which will be dump into one output file. - bool allow_sys; - // other options - int32_t thread_num; - int abort; - char **arg_list; - int arg_list_len; - bool isDumpIn; - bool debug_print; - bool verbose_print; - bool performance_print; -} SArguments; - -/* Parse a single option. */ -static error_t parse_opt(int key, char *arg, struct argp_state *state) { - /* Get the input argument from argp_parse, which we - know is a pointer to our arguments structure. */ - struct arguments *arguments = state->input; - wordexp_t full_path; - - switch (key) { // connection option - case 'a': - arguments->allow_sys = true; - break; - case 'h': - arguments->host = arg; - break; - case 'u': - arguments->user = arg; - break; - case 'p': - arguments->password = arg; - break; - case 'P': - arguments->port = atoi(arg); - break; - case 'q': - arguments->mysqlFlag = atoi(arg); - break; - case 'v': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid client vesion %s\n", arg); - return -1; - } - tstrncpy(arguments->cversion, full_path.we_wordv[0], 11); - wordfree(&full_path); - break; - // output file path - case 'o': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; - } - tstrncpy(arguments->outpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); - wordfree(&full_path); - break; - case 'g': - arguments->debug_print = true; - break; - case 'i': - arguments->isDumpIn = true; - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; - } - tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); - wordfree(&full_path); - break; - case 'r': - arguments->resultFile = arg; - break; - case 'c': - if (wordexp(arg, &full_path, 0) != 0) { - fprintf(stderr, "Invalid path %s\n", arg); - return -1; - } - tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); - wordfree(&full_path); - break; - case 'e': - arguments->encode = arg; - break; + char *host; + char *user; + char *password; + uint16_t port; + char cversion[12]; + uint16_t mysqlFlag; + // output file + char outpath[MAX_FILE_NAME_LEN]; + char inpath[MAX_FILE_NAME_LEN]; + // result file + char *resultFile; + char *encode; // dump unit option - case 'A': - arguments->all_databases = true; - break; - case 'D': - arguments->databases = true; - break; + bool all_databases; + bool databases; // dump format option - case 's': - arguments->schemaonly = true; - break; - case 'N': - arguments->with_property = false; - break; - case 'S': - // parse time here. - arguments->start_time = atol(arg); - break; - case 'E': - arguments->end_time = atol(arg); - break; - case 'B': - arguments->data_batch = atoi(arg); - if (arguments->data_batch >= INT16_MAX) { - arguments->data_batch = INT16_MAX - 1; - } - break; - case 'L': - { - int32_t len = atoi(arg); - if (len > TSDB_MAX_ALLOWED_SQL_LEN) { - len = TSDB_MAX_ALLOWED_SQL_LEN; - } else if (len < TSDB_MAX_SQL_LEN) { - len = TSDB_MAX_SQL_LEN; - } - arguments->max_sql_len = len; - break; - } - case 't': - arguments->table_batch = atoi(arg); - break; - case 'T': - arguments->thread_num = atoi(arg); - break; - case OPT_ABORT: - arguments->abort = 1; - break; - case ARGP_KEY_ARG: - arguments->arg_list = &state->argv[state->next - 1]; - arguments->arg_list_len = state->argc - state->next + 1; - state->next = state->argc; - break; - - default: - return ARGP_ERR_UNKNOWN; - } - return 0; -} + bool schemaonly; + bool with_property; + bool avro; + int64_t start_time; + int64_t end_time; + char precision[8]; + int32_t data_batch; + int32_t max_sql_len; + int32_t table_batch; // num of table which will be dump into one output file. + bool allow_sys; + // other options + int32_t thread_num; + int abort; + char **arg_list; + int arg_list_len; + bool isDumpIn; + bool debug_print; + bool verbose_print; + bool performance_print; +} SArguments; /* Our argp parser. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state); + static struct argp argp = {options, parse_opt, args_doc, doc}; static resultStatistics g_resultStatistics = {0}; static FILE *g_fpOfResult = NULL; static int g_numOfCores = 1; -static int taosDumpOut(struct arguments *arguments); -static int taosDumpIn(struct arguments *arguments); -static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); -static int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); -static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); -static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); -static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); -static int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); -static int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); +static int taosDumpOut(); +static int taosDumpIn(); +static void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, + FILE *fp); +static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon); +static int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, + char* dbName); +static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, + FILE *fp, char* dbName); +static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, + int numOfCols, FILE *fp, char* dbName); +static int32_t taosDumpTable(char *table, char *metric, + FILE *fp, TAOS* taosCon, char* dbName); +static int taosDumpTableData(FILE *fp, char *tbName, + TAOS* taosCon, char* dbName, + char *jsonAvroSchema); static int taosCheckParam(struct arguments *arguments); static void taosFreeDbInfos(); -static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName); +static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName); struct arguments g_args = { - // connection option - NULL, - "root", - #ifdef _TD_POWER_ - "powerdb", - #else - "taosdata", - #endif - 0, - "", - 0, - // outpath and inpath - "", - "", - "./dump_result.txt", - NULL, - // dump unit option - false, - false, - // dump format option - false, // schemeonly - true, // with_property - 0, - INT64_MAX, - 1, - TSDB_MAX_SQL_LEN, - 1, - false, - // other options - 5, - 0, - NULL, - 0, - false, - false, // debug_print - false, // verbose_print - false // performance_print + // connection option + NULL, + "root", +#ifdef _TD_POWER_ + "powerdb", +#else + "taosdata", +#endif + 0, + "", + 0, + // outpath and inpath + "", + "", + "./dump_result.txt", + NULL, + // dump unit option + false, + false, + // dump format option + false, // schemeonly + true, // with_property + false, // avro format + -INT64_MAX, // start_time + INT64_MAX, // end_time + "ms", // precision + 1, // data_batch + TSDB_MAX_SQL_LEN, // max_sql_len + 1, // table_batch + false, // allow_sys + // other options + 5, // thread_num + 0, // abort + NULL, // arg_list + 0, // arg_list_len + false, // isDumpIn + false, // debug_print + false, // verbose_print + false // performance_print }; +/* Parse a single option. */ +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + /* Get the input argument from argp_parse, which we + know is a pointer to our arguments structure. */ + wordexp_t full_path; + + switch (key) { + // connection option + case 'a': + g_args.allow_sys = true; + break; + case 'h': + g_args.host = arg; + break; + case 'u': + g_args.user = arg; + break; + case 'p': + g_args.password = arg; + break; + case 'P': + g_args.port = atoi(arg); + break; + case 'q': + g_args.mysqlFlag = atoi(arg); + break; + case 'v': + if (wordexp(arg, &full_path, 0) != 0) { + errorPrint("Invalid client vesion %s\n", arg); + return -1; + } + tstrncpy(g_args.cversion, full_path.we_wordv[0], 11); + wordfree(&full_path); + break; + // output file path + case 'o': + if (wordexp(arg, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", arg); + return -1; + } + tstrncpy(g_args.outpath, full_path.we_wordv[0], + MAX_FILE_NAME_LEN); + wordfree(&full_path); + break; + case 'g': + g_args.debug_print = true; + break; + case 'i': + g_args.isDumpIn = true; + if (wordexp(arg, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", arg); + return -1; + } + tstrncpy(g_args.inpath, full_path.we_wordv[0], + MAX_FILE_NAME_LEN); + wordfree(&full_path); + break; + case 'r': + g_args.resultFile = arg; + break; + case 'c': + if (wordexp(arg, &full_path, 0) != 0) { + errorPrint("Invalid path %s\n", arg); + return -1; + } + tstrncpy(configDir, full_path.we_wordv[0], MAX_FILE_NAME_LEN); + wordfree(&full_path); + break; + case 'e': + g_args.encode = arg; + break; + // dump unit option + case 'A': + g_args.all_databases = true; + break; + case 'D': + g_args.databases = true; + break; + // dump format option + case 's': + g_args.schemaonly = true; + break; + case 'N': + g_args.with_property = false; + break; + case 'V': + g_args.avro = true; + break; + case 'S': + // parse time here. + g_args.start_time = atol(arg); + break; + case 'E': + g_args.end_time = atol(arg); + break; + case 'C': + break; + case 'B': + g_args.data_batch = atoi(arg); + if (g_args.data_batch > MAX_RECORDS_PER_REQ) { + g_args.data_batch = MAX_RECORDS_PER_REQ; + } + break; + case 'L': + { + int32_t len = atoi(arg); + if (len > TSDB_MAX_ALLOWED_SQL_LEN) { + len = TSDB_MAX_ALLOWED_SQL_LEN; + } else if (len < TSDB_MAX_SQL_LEN) { + len = TSDB_MAX_SQL_LEN; + } + g_args.max_sql_len = len; + break; + } + case 't': + g_args.table_batch = atoi(arg); + break; + case 'T': + g_args.thread_num = atoi(arg); + break; + case OPT_ABORT: + g_args.abort = 1; + break; + case ARGP_KEY_ARG: + g_args.arg_list = &state->argv[state->next - 1]; + g_args.arg_list_len = state->argc - state->next + 1; + state->next = state->argc; + break; + + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} + static int queryDbImpl(TAOS *taos, char *command) { - int i; - TAOS_RES *res = NULL; - int32_t code = -1; + int i; + TAOS_RES *res = NULL; + int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != res) { + taos_free_result(res); + res = NULL; + } - for (i = 0; i < 5; i++) { - if (NULL != res) { - taos_free_result(res); - res = NULL; + res = taos_query(taos, command); + code = taos_errno(res); + if (0 == code) { + break; + } } - res = taos_query(taos, command); - code = taos_errno(res); - if (0 == code) { - break; + if (code != 0) { + errorPrint("Failed to run <%s>, reason: %s\n", command, taos_errstr(res)); + taos_free_result(res); + //taos_close(taos); + return -1; } - } - if (code != 0) { - fprintf(stderr, "Failed to run <%s>, reason: %s\n", command, taos_errstr(res)); taos_free_result(res); - //taos_close(taos); - return -1; - } + return 0; +} - taos_free_result(res); - return 0; +static void parse_precision_first( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if (strcmp(argv[i], "-C") == 0) { + if (NULL == argv[i+1]) { + errorPrint("%s need a valid value following!\n", argv[i]); + exit(-1); + } + char *tmp = strdup(argv[i+1]); + if (tmp == NULL) { + errorPrint("%s() LN%d, strdup() cannot allocate memory\n", + __func__, __LINE__); + exit(-1); + } + if ((0 != strncasecmp(tmp, "ms", strlen("ms"))) + && (0 != strncasecmp(tmp, "us", strlen("us"))) +#if TSDB_SUPPORT_NANOSECOND == 1 + && (0 != strncasecmp(tmp, "ns", strlen("ns"))) +#endif + ) { + // + errorPrint("input precision: %s is invalid value\n", tmp); + free(tmp); + exit(-1); + } + tstrncpy(g_args.precision, tmp, + min(DB_PRECISION_LEN, strlen(tmp) + 1)); + free(tmp); + } + } } -static void parse_args(int argc, char *argv[], SArguments *arguments) { - for (int i = 1; i < argc; i++) { - if ((strcmp(argv[i], "-S") == 0) - || (strcmp(argv[i], "-E") == 0)) { - if (argv[i+1]) { - char *tmp = strdup(argv[++i]); - - if (tmp) { - int64_t tmpEpoch; - if (strchr(tmp, ':') && strchr(tmp, '-')) { - if (TSDB_CODE_SUCCESS != taosParseTime( - tmp, &tmpEpoch, strlen(tmp), TSDB_TIME_PRECISION_MILLI, 0)) { - fprintf(stderr, "Input end time error!\n"); - free(tmp); - return; +static void parse_timestamp( + int argc, char *argv[], SArguments *arguments) { + for (int i = 1; i < argc; i++) { + if ((strcmp(argv[i], "-S") == 0) + || (strcmp(argv[i], "-E") == 0)) { + if (NULL == argv[i+1]) { + errorPrint("%s need a valid value following!\n", argv[i]); + exit(-1); + } + char *tmp = strdup(argv[i+1]); + if (NULL == tmp) { + errorPrint("%s() LN%d, strdup() cannot allocate memory\n", + __func__, __LINE__); + exit(-1); } - } else { - tmpEpoch = atoll(tmp); - } - sprintf(argv[i], "%"PRId64"", tmpEpoch); - debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", - __func__, __LINE__, tmp, i, argv[i]); + int64_t tmpEpoch; + if (strchr(tmp, ':') && strchr(tmp, '-')) { + int32_t timePrec; + if (0 == strncasecmp(arguments->precision, + "ms", strlen("ms"))) { + timePrec = TSDB_TIME_PRECISION_MILLI; + } else if (0 == strncasecmp(arguments->precision, + "us", strlen("us"))) { + timePrec = TSDB_TIME_PRECISION_MICRO; +#if TSDB_SUPPORT_NANOSECOND == 1 + } else if (0 == strncasecmp(arguments->precision, + "ns", strlen("ns"))) { + timePrec = TSDB_TIME_PRECISION_NANO; +#endif + } else { + errorPrint("Invalid time precision: %s", + arguments->precision); + free(tmp); + return; + } + + if (TSDB_CODE_SUCCESS != taosParseTime( + tmp, &tmpEpoch, strlen(tmp), + timePrec, 0)) { + errorPrint("Input %s, end time error!\n", tmp); + free(tmp); + return; + } + } else { + tstrncpy(arguments->precision, "n/a", strlen("n/a") + 1); + tmpEpoch = atoll(tmp); + } - free(tmp); - } else { - errorPrint("%s() LN%d, strdup() cannot allocate memory\n", __func__, __LINE__); - exit(-1); + sprintf(argv[i+1], "%"PRId64"", tmpEpoch); + debugPrint("%s() LN%d, tmp is: %s, argv[%d]: %s\n", + __func__, __LINE__, tmp, i, argv[i]); + free(tmp); } - } else { - errorPrint("%s need a valid value following!\n", argv[i]); - exit(-1); - } - } else if (strcmp(argv[i], "-g") == 0) { - arguments->debug_print = true; } - } } int main(int argc, char *argv[]) { - /* Parse our arguments; every option seen by parse_opt will be - reflected in arguments. */ - if (argc > 2) - parse_args(argc, argv, &g_args); + int ret = 0; + /* Parse our arguments; every option seen by parse_opt will be + reflected in arguments. */ + if (argc > 2) { + parse_precision_first(argc, argv, &g_args); + parse_timestamp(argc, argv, &g_args); + } - argp_parse(&argp, argc, argv, 0, 0, &g_args); + argp_parse(&argp, argc, argv, 0, 0, &g_args); - if (g_args.abort) { - #ifndef _ALPINE - error(10, 0, "ABORTED"); - #else - abort(); - #endif - } + if (g_args.abort) { +#ifndef _ALPINE + error(10, 0, "ABORTED"); +#else + abort(); +#endif + } - printf("====== arguments config ======\n"); - { - printf("host: %s\n", g_args.host); - printf("user: %s\n", g_args.user); - printf("password: %s\n", g_args.password); - printf("port: %u\n", g_args.port); - printf("cversion: %s\n", g_args.cversion); - printf("mysqlFlag: %d\n", g_args.mysqlFlag); - printf("outpath: %s\n", g_args.outpath); - printf("inpath: %s\n", g_args.inpath); - printf("resultFile: %s\n", g_args.resultFile); - printf("encode: %s\n", g_args.encode); - printf("all_databases: %d\n", g_args.all_databases); - printf("databases: %d\n", g_args.databases); - printf("schemaonly: %d\n", g_args.schemaonly); - printf("with_property: %d\n", g_args.with_property); - printf("start_time: %" PRId64 "\n", g_args.start_time); - printf("end_time: %" PRId64 "\n", g_args.end_time); - printf("data_batch: %d\n", g_args.data_batch); - printf("max_sql_len: %d\n", g_args.max_sql_len); - printf("table_batch: %d\n", g_args.table_batch); - printf("thread_num: %d\n", g_args.thread_num); - printf("allow_sys: %d\n", g_args.allow_sys); - printf("abort: %d\n", g_args.abort); - printf("isDumpIn: %d\n", g_args.isDumpIn); - printf("arg_list_len: %d\n", g_args.arg_list_len); - printf("debug_print: %d\n", g_args.debug_print); - - for (int32_t i = 0; i < g_args.arg_list_len; i++) { - printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]); + printf("====== arguments config ======\n"); + { + printf("host: %s\n", g_args.host); + printf("user: %s\n", g_args.user); + printf("password: %s\n", g_args.password); + printf("port: %u\n", g_args.port); + printf("cversion: %s\n", g_args.cversion); + printf("mysqlFlag: %d\n", g_args.mysqlFlag); + printf("outpath: %s\n", g_args.outpath); + printf("inpath: %s\n", g_args.inpath); + printf("resultFile: %s\n", g_args.resultFile); + printf("encode: %s\n", g_args.encode); + printf("all_databases: %s\n", g_args.all_databases?"true":"false"); + printf("databases: %d\n", g_args.databases); + printf("schemaonly: %s\n", g_args.schemaonly?"true":"false"); + printf("with_property: %s\n", g_args.with_property?"true":"false"); + printf("avro format: %s\n", g_args.avro?"true":"false"); + printf("start_time: %" PRId64 "\n", g_args.start_time); + printf("end_time: %" PRId64 "\n", g_args.end_time); + printf("precision: %s\n", g_args.precision); + printf("data_batch: %d\n", g_args.data_batch); + printf("max_sql_len: %d\n", g_args.max_sql_len); + printf("table_batch: %d\n", g_args.table_batch); + printf("thread_num: %d\n", g_args.thread_num); + printf("allow_sys: %d\n", g_args.allow_sys); + printf("abort: %d\n", g_args.abort); + printf("isDumpIn: %d\n", g_args.isDumpIn); + printf("arg_list_len: %d\n", g_args.arg_list_len); + printf("debug_print: %d\n", g_args.debug_print); + + for (int32_t i = 0; i < g_args.arg_list_len; i++) { + printf("arg_list[%d]: %s\n", i, g_args.arg_list[i]); + } } - } - printf("==============================\n"); + printf("==============================\n"); - if (g_args.cversion[0] != 0){ - tstrncpy(version, g_args.cversion, 11); - } + if (g_args.cversion[0] != 0){ + tstrncpy(version, g_args.cversion, 11); + } - if (taosCheckParam(&g_args) < 0) { - exit(EXIT_FAILURE); - } + if (taosCheckParam(&g_args) < 0) { + exit(EXIT_FAILURE); + } - g_fpOfResult = fopen(g_args.resultFile, "a"); - if (NULL == g_fpOfResult) { - fprintf(stderr, "Failed to open %s for save result\n", g_args.resultFile); - return 1; - }; - - fprintf(g_fpOfResult, "#############################################################################\n"); - fprintf(g_fpOfResult, "============================== arguments config =============================\n"); - { - fprintf(g_fpOfResult, "host: %s\n", g_args.host); - fprintf(g_fpOfResult, "user: %s\n", g_args.user); - fprintf(g_fpOfResult, "password: %s\n", g_args.password); - fprintf(g_fpOfResult, "port: %u\n", g_args.port); - fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion); - fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag); - fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath); - fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath); - fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile); - fprintf(g_fpOfResult, "encode: %s\n", g_args.encode); - fprintf(g_fpOfResult, "all_databases: %d\n", g_args.all_databases); - fprintf(g_fpOfResult, "databases: %d\n", g_args.databases); - fprintf(g_fpOfResult, "schemaonly: %d\n", g_args.schemaonly); - fprintf(g_fpOfResult, "with_property: %d\n", g_args.with_property); - fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); - fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); - fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); - fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); - fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch); - fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num); - fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys); - fprintf(g_fpOfResult, "abort: %d\n", g_args.abort); - fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn); - fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len); - - for (int32_t i = 0; i < g_args.arg_list_len; i++) { - fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]); + g_fpOfResult = fopen(g_args.resultFile, "a"); + if (NULL == g_fpOfResult) { + errorPrint("Failed to open %s for save result\n", g_args.resultFile); + exit(-1); + }; + + fprintf(g_fpOfResult, "#############################################################################\n"); + fprintf(g_fpOfResult, "============================== arguments config =============================\n"); + { + fprintf(g_fpOfResult, "host: %s\n", g_args.host); + fprintf(g_fpOfResult, "user: %s\n", g_args.user); + fprintf(g_fpOfResult, "password: %s\n", g_args.password); + fprintf(g_fpOfResult, "port: %u\n", g_args.port); + fprintf(g_fpOfResult, "cversion: %s\n", g_args.cversion); + fprintf(g_fpOfResult, "mysqlFlag: %d\n", g_args.mysqlFlag); + fprintf(g_fpOfResult, "outpath: %s\n", g_args.outpath); + fprintf(g_fpOfResult, "inpath: %s\n", g_args.inpath); + fprintf(g_fpOfResult, "resultFile: %s\n", g_args.resultFile); + fprintf(g_fpOfResult, "encode: %s\n", g_args.encode); + fprintf(g_fpOfResult, "all_databases: %s\n", g_args.all_databases?"true":"false"); + fprintf(g_fpOfResult, "databases: %d\n", g_args.databases); + fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false"); + fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); + fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); + fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); + fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); + fprintf(g_fpOfResult, "precision: %s\n", g_args.precision); + fprintf(g_fpOfResult, "data_batch: %d\n", g_args.data_batch); + fprintf(g_fpOfResult, "max_sql_len: %d\n", g_args.max_sql_len); + fprintf(g_fpOfResult, "table_batch: %d\n", g_args.table_batch); + fprintf(g_fpOfResult, "thread_num: %d\n", g_args.thread_num); + fprintf(g_fpOfResult, "allow_sys: %d\n", g_args.allow_sys); + fprintf(g_fpOfResult, "abort: %d\n", g_args.abort); + fprintf(g_fpOfResult, "isDumpIn: %d\n", g_args.isDumpIn); + fprintf(g_fpOfResult, "arg_list_len: %d\n", g_args.arg_list_len); + + for (int32_t i = 0; i < g_args.arg_list_len; i++) { + fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, g_args.arg_list[i]); + } } - } - g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN); - - time_t tTime = time(NULL); - struct tm tm = *localtime(&tTime); - - if (g_args.isDumpIn) { - fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n"); - fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - if (taosDumpIn(&g_args) < 0) { - fprintf(g_fpOfResult, "\n"); - fclose(g_fpOfResult); - return -1; - } - } else { - fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n"); - fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); - if (taosDumpOut(&g_args) < 0) { - fprintf(g_fpOfResult, "\n"); - fclose(g_fpOfResult); - return -1; - } - - fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n"); - fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut); - fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut); - fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut); - fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut); - } + g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN); + + time_t tTime = time(NULL); + struct tm tm = *localtime(&tTime); + + if (g_args.isDumpIn) { + fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n"); + fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", + tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + if (taosDumpIn() < 0) { + ret = -1; + } + } else { + fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n"); + fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", + tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + if (taosDumpOut() < 0) { + ret = -1; + } else { + fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n"); + fprintf(g_fpOfResult, "# total database count: %d\n", + g_resultStatistics.totalDatabasesOfDumpOut); + fprintf(g_fpOfResult, "# total super table count: %d\n", + g_resultStatistics.totalSuperTblsOfDumpOut); + fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", + g_resultStatistics.totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", + g_resultStatistics.totalRowsOfDumpOut); + } + } - fprintf(g_fpOfResult, "\n"); - fclose(g_fpOfResult); + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); - return 0; + return ret; } -void taosFreeDbInfos() { - if (dbInfos == NULL) return; - for (int i = 0; i < 128; i++) tfree(dbInfos[i]); - tfree(dbInfos); +static void taosFreeDbInfos() { + if (g_dbInfos == NULL) return; + for (int i = 0; i < 128; i++) tfree(g_dbInfos[i]); + tfree(g_dbInfos); } // check table is normal table or super table -int taosGetTableRecordInfo(char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) { - TAOS_ROW row = NULL; - bool isSet = false; - TAOS_RES *result = NULL; +static int taosGetTableRecordInfo( + char *table, STableRecordInfo *pTableRecordInfo, TAOS *taosCon) { + TAOS_ROW row = NULL; + bool isSet = false; + TAOS_RES *result = NULL; + + memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); + + char* tempCommand = (char *)malloc(COMMAND_SIZE); + if (tempCommand == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + return -1; + } - memset(pTableRecordInfo, 0, sizeof(STableRecordInfo)); + sprintf(tempCommand, "show tables like %s", table); - char* tempCommand = (char *)malloc(COMMAND_SIZE); - if (tempCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } + result = taos_query(taosCon, tempCommand); + int32_t code = taos_errno(result); - sprintf(tempCommand, "show tables like %s", table); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command %s\n", + __func__, __LINE__, tempCommand); + free(tempCommand); + taos_free_result(result); + return -1; + } - result = taos_query(taosCon, tempCommand); - int32_t code = taos_errno(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = false; + tstrncpy(pTableRecordInfo->tableRecord.name, + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1); + tstrncpy(pTableRecordInfo->tableRecord.metric, + (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1); + break; + } - if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tempCommand); - free(tempCommand); taos_free_result(result); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(result); + result = NULL; - while ((row = taos_fetch_row(result)) != NULL) { - isSet = true; - pTableRecordInfo->isMetric = false; - strncpy(pTableRecordInfo->tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); - strncpy(pTableRecordInfo->tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); - break; - } + if (isSet) { + free(tempCommand); + return 0; + } - taos_free_result(result); - result = NULL; + sprintf(tempCommand, "show stables like %s", table); - if (isSet) { - free(tempCommand); - return 0; - } + result = taos_query(taosCon, tempCommand); + code = taos_errno(result); - sprintf(tempCommand, "show stables like %s", table); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command %s\n", + __func__, __LINE__, tempCommand); + free(tempCommand); + taos_free_result(result); + return -1; + } - result = taos_query(taosCon, tempCommand); - code = taos_errno(result); + while ((row = taos_fetch_row(result)) != NULL) { + isSet = true; + pTableRecordInfo->isMetric = true; + tstrncpy(pTableRecordInfo->tableRecord.metric, table, + TSDB_TABLE_NAME_LEN); + break; + } - if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tempCommand); - free(tempCommand); taos_free_result(result); - return -1; - } + result = NULL; - while ((row = taos_fetch_row(result)) != NULL) { - isSet = true; - pTableRecordInfo->isMetric = true; - tstrncpy(pTableRecordInfo->tableRecord.metric, table, TSDB_TABLE_NAME_LEN); - break; - } - - taos_free_result(result); - result = NULL; - - if (isSet) { + if (isSet) { + free(tempCommand); + return 0; + } + errorPrint("%s() LN%d, invalid table/metric %s\n", + __func__, __LINE__, table); free(tempCommand); - return 0; - } - fprintf(stderr, "invalid table/metric %s\n", table); - free(tempCommand); - return -1; + return -1; } -int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, char* metric, int* fd) { - STableRecord tableRecord; +static int32_t taosSaveAllNormalTableToTempFile(TAOS *taosCon, char*meter, + char* metric, int* fd) { + STableRecord tableRecord; - if (-1 == *fd) { - *fd = open(".tables.tmp.0", O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (*fd == -1) { - fprintf(stderr, "failed to open temp file: .tables.tmp.0\n"); - return -1; + if (-1 == *fd) { + *fd = open(".tables.tmp.0", + O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (*fd == -1) { + errorPrint("%s() LN%d, failed to open temp file: .tables.tmp.0\n", + __func__, __LINE__); + return -1; + } } - } - memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN); - tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, meter, TSDB_TABLE_NAME_LEN); + tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - taosWrite(*fd, &tableRecord, sizeof(STableRecord)); - return 0; + taosWrite(*fd, &tableRecord, sizeof(STableRecord)); + return 0; } +static int32_t taosSaveTableOfMetricToTempFile( + TAOS *taosCon, char* metric, + int32_t* totalNumOfThread) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; -int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct arguments *arguments, int32_t* totalNumOfThread) { - TAOS_ROW row; - int fd = -1; - STableRecord tableRecord; - - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } + char* tmpCommand = (char *)malloc(COMMAND_SIZE); + if (tmpCommand == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); + return -1; + } - sprintf(tmpCommand, "select tbname from %s", metric); + sprintf(tmpCommand, "select tbname from %s", metric); - TAOS_RES *res = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(res); - if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tmpCommand); + TAOS_RES *res = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command %s\n", + __func__, __LINE__, tmpCommand); + free(tmpCommand); + taos_free_result(res); + return -1; + } free(tmpCommand); - taos_free_result(res); - return -1; - } - free(tmpCommand); - - char tmpBuf[TSDB_FILENAME_LEN + 1]; - memset(tmpBuf, 0, TSDB_FILENAME_LEN); - sprintf(tmpBuf, ".select-tbname.tmp"); - fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); - taos_free_result(res); - return -1; - } - TAOS_FIELD *fields = taos_fetch_fields(res); + char tmpBuf[MAX_FILE_NAME_LEN]; + memset(tmpBuf, 0, MAX_FILE_NAME_LEN); + sprintf(tmpBuf, ".select-tbname.tmp"); + fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + errorPrint("%s() LN%d, failed to open temp file: %s\n", + __func__, __LINE__, tmpBuf); + taos_free_result(res); + return -1; + } - int32_t numOfTable = 0; - while ((row = taos_fetch_row(res)) != NULL) { + TAOS_FIELD *fields = taos_fetch_fields(res); - memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); - tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + int32_t numOfTable = 0; + while ((row = taos_fetch_row(res)) != NULL) { - taosWrite(fd, &tableRecord, sizeof(STableRecord)); - numOfTable++; - } - taos_free_result(res); - lseek(fd, 0, SEEK_SET); - - int maxThreads = arguments->thread_num; - int tableOfPerFile ; - if (numOfTable <= arguments->thread_num) { - tableOfPerFile = 1; - maxThreads = numOfTable; - } else { - tableOfPerFile = numOfTable / arguments->thread_num; - if (0 != numOfTable % arguments->thread_num) { - tableOfPerFile += 1; + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); + tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + numOfTable++; } - } + taos_free_result(res); + lseek(fd, 0, SEEK_SET); - char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); - if (NULL == tblBuf){ - fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); - close(fd); - return -1; - } + int maxThreads = g_args.thread_num; + int tableOfPerFile ; + if (numOfTable <= g_args.thread_num) { + tableOfPerFile = 1; + maxThreads = numOfTable; + } else { + tableOfPerFile = numOfTable / g_args.thread_num; + if (0 != numOfTable % g_args.thread_num) { + tableOfPerFile += 1; + } + } - int32_t numOfThread = *totalNumOfThread; - int subFd = -1; - for (; numOfThread < maxThreads; numOfThread++) { - memset(tmpBuf, 0, TSDB_FILENAME_LEN); - sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); - subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (subFd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); - for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); - (void)remove(tmpBuf); - } - sprintf(tmpBuf, ".select-tbname.tmp"); - (void)remove(tmpBuf); - free(tblBuf); - close(fd); - return -1; + char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); + if (NULL == tblBuf){ + errorPrint("%s() LN%d, failed to calloc %" PRIzu "\n", + __func__, __LINE__, tableOfPerFile * sizeof(STableRecord)); + close(fd); + return -1; } - // read tableOfPerFile for fd, write to subFd - ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); - if (readLen <= 0) { - close(subFd); - break; + int32_t numOfThread = *totalNumOfThread; + int subFd = -1; + for (; numOfThread < maxThreads; numOfThread++) { + memset(tmpBuf, 0, MAX_FILE_NAME_LEN); + sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); + subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (subFd == -1) { + errorPrint("%s() LN%d, failed to open temp file: %s\n", + __func__, __LINE__, tmpBuf); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + sprintf(tmpBuf, ".select-tbname.tmp"); + (void)remove(tmpBuf); + free(tblBuf); + close(fd); + return -1; + } + + // read tableOfPerFile for fd, write to subFd + ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); + if (readLen <= 0) { + close(subFd); + break; + } + taosWrite(subFd, tblBuf, readLen); + close(subFd); } - taosWrite(subFd, tblBuf, readLen); - close(subFd); - } - sprintf(tmpBuf, ".select-tbname.tmp"); - (void)remove(tmpBuf); + sprintf(tmpBuf, ".select-tbname.tmp"); + (void)remove(tmpBuf); - if (fd >= 0) { - close(fd); - fd = -1; - } + if (fd >= 0) { + close(fd); + fd = -1; + } - *totalNumOfThread = numOfThread; + *totalNumOfThread = numOfThread; - free(tblBuf); - return 0; + free(tblBuf); + return 0; } -int taosDumpOut(struct arguments *arguments) { - TAOS *taos = NULL; - TAOS_RES *result = NULL; - char *command = NULL; - - TAOS_ROW row; - FILE *fp = NULL; - int32_t count = 0; - STableRecordInfo tableRecordInfo; - - char tmpBuf[TSDB_FILENAME_LEN+9] = {0}; - if (arguments->outpath[0] != 0) { - sprintf(tmpBuf, "%s/dbs.sql", arguments->outpath); - } else { - sprintf(tmpBuf, "dbs.sql"); - } - - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", tmpBuf); - return -1; - } +static int taosDumpOut() { + TAOS *taos = NULL; + TAOS_RES *result = NULL; + char *command = NULL; - dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); - if (dbInfos == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - goto _exit_failure; - } + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; + STableRecordInfo tableRecordInfo; - command = (char *)malloc(COMMAND_SIZE); - if (command == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - goto _exit_failure; - } + char tmpBuf[4096] = {0}; + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); + } - /* Connect to server */ - taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); - if (taos == NULL) { - fprintf(stderr, "failed to connect to TDengine server\n"); - goto _exit_failure; - } + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } - /* --------------------------------- Main Code -------------------------------- */ - /* if (arguments->databases || arguments->all_databases) { // dump part of databases or all databases */ - /* */ - taosDumpCharset(fp); + g_dbInfos = (SDbInfo **)calloc(128, sizeof(SDbInfo *)); + if (g_dbInfos == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + goto _exit_failure; + } - sprintf(command, "show databases"); - result = taos_query(taos, command); - int32_t code = taos_errno(result); + command = (char *)malloc(COMMAND_SIZE); + if (command == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", __func__, __LINE__); + goto _exit_failure; + } - if (code != 0) { - fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result)); - goto _exit_failure; - } + /* Connect to server */ + taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + goto _exit_failure; + } - TAOS_FIELD *fields = taos_fetch_fields(result); + /* --------------------------------- Main Code -------------------------------- */ + /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ + /* */ + taosDumpCharset(fp); - while ((row = taos_fetch_row(result)) != NULL) { - // sys database name : 'log', but subsequent version changed to 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0 && - (!arguments->allow_sys)) - continue; + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); - if (arguments->databases) { // input multi dbs - for (int i = 0; arguments->arg_list[i]; i++) { - if (strncasecmp(arguments->arg_list[i], (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) - goto _dump_db_point; - } - continue; - } else if (!arguments->all_databases) { // only input one db - if (strncasecmp(arguments->arg_list[0], (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) - goto _dump_db_point; - else - continue; - } - - _dump_db_point: - - dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (dbInfos[count] == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - goto _exit_failure; - } - - strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - if (arguments->with_property) { - dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); - //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); - //dbInfos[count]->daysToKeep1; - //dbInfos[count]->daysToKeep2; - dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); - //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); - dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - } - count++; - - if (arguments->databases) { - if (count > arguments->arg_list_len) break; - - } else if (!arguments->all_databases) { - if (count >= 1) break; + if (code != 0) { + errorPrint("%s() LN%d, failed to run command: %s, reason: %s\n", + __func__, __LINE__, command, taos_errstr(result)); + goto _exit_failure; } - } - if (count == 0) { - fprintf(stderr, "No databases valid to dump\n"); - goto _exit_failure; - } + TAOS_FIELD *fields = taos_fetch_fields(result); - if (arguments->databases || arguments->all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases - for (int i = 0; i < count; i++) { - taosDumpDb(dbInfos[i], arguments, fp, taos); - } - } else { - if (arguments->arg_list_len == 1) { // case: taosdump - taosDumpDb(dbInfos[0], arguments, fp, taos); - } else { // case: taosdump tablex tabley ... - taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); - fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfos[0]->name); - g_resultStatistics.totalDatabasesOfDumpOut++; + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'log', but subsequent version changed to 'log' + if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + && (!g_args.allow_sys)) { + continue; + } - sprintf(command, "use %s", dbInfos[0]->name); + if (g_args.databases) { // input multi dbs + for (int i = 0; g_args.arg_list[i]; i++) { + if (strncasecmp(g_args.arg_list[i], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + } + continue; + } else if (!g_args.all_databases) { // only input one db + if (strncasecmp(g_args.arg_list[0], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + goto _dump_db_point; + else + continue; + } - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - fprintf(stderr, "invalid database %s\n", dbInfos[0]->name); - goto _exit_failure; - } +_dump_db_point: - fprintf(fp, "USE %s;\n\n", dbInfos[0]->name); + g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (g_dbInfos[count] == NULL) { + errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", + __func__, __LINE__, (uint64_t)sizeof(SDbInfo)); + goto _exit_failure; + } - int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 - int normalTblFd = -1; - int32_t retCode; - int superTblCnt = 0 ; - for (int i = 1; arguments->arg_list[i]; i++) { - if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { - fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); - continue; + tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + min(TSDB_DB_NAME_LEN, fields[TSDB_SHOW_DB_NAME_INDEX].bytes) + 1); + if (g_args.with_property) { + g_dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + g_dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + g_dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + g_dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + g_dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + tstrncpy(g_dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes) + 1); + //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + //g_dbInfos[count]->daysToKeep1; + //g_dbInfos[count]->daysToKeep2; + g_dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + g_dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + g_dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + g_dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + g_dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + g_dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + g_dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + g_dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); + + tstrncpy(g_dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + min(8, fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes) + 1); + //g_dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + g_dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } + count++; - if (tableRecordInfo.isMetric) { // dump all table of this metric - int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); - if (0 == ret) { - superTblCnt++; - } - retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); - } else { - if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric - int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); - if (0 == ret) { - superTblCnt++; - } - } - retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); + if (g_args.databases) { + if (count > g_args.arg_list_len) break; + + } else if (!g_args.all_databases) { + if (count >= 1) break; } + } + + if (count == 0) { + errorPrint("%d databases valid to dump\n", count); + goto _exit_failure; + } - if (retCode < 0) { - if (-1 != normalTblFd){ - taosClose(normalTblFd); - } - goto _clean_tmp_file; + if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx dby ... OR taosdump --all-databases + for (int i = 0; i < count; i++) { + taosDumpDb(g_dbInfos[i], fp, taos); } - } + } else { + if (g_args.arg_list_len == 1) { // case: taosdump + taosDumpDb(g_dbInfos[0], fp, taos); + } else { // case: taosdump tablex tabley ... + taosDumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp); + fprintf(g_fpOfResult, "\n#### database: %s\n", + g_dbInfos[0]->name); + g_resultStatistics.totalDatabasesOfDumpOut++; + + sprintf(command, "use %s", g_dbInfos[0]->name); + + result = taos_query(taos, command); + code = taos_errno(result); + if (code != 0) { + errorPrint("invalid database %s\n", g_dbInfos[0]->name); + goto _exit_failure; + } - // TODO: save dump super table into result_output.txt - fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); - g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + fprintf(fp, "USE %s;\n\n", g_dbInfos[0]->name); + + int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 + int normalTblFd = -1; + int32_t retCode; + int superTblCnt = 0 ; + for (int i = 1; g_args.arg_list[i]; i++) { + if (taosGetTableRecordInfo(g_args.arg_list[i], + &tableRecordInfo, taos) < 0) { + errorPrint("input the invalide table %s\n", + g_args.arg_list[i]); + continue; + } + + if (tableRecordInfo.isMetric) { // dump all table of this metric + int ret = taosDumpStable( + tableRecordInfo.tableRecord.metric, + fp, taos, g_dbInfos[0]->name); + if (0 == ret) { + superTblCnt++; + } + retCode = taosSaveTableOfMetricToTempFile( + taos, tableRecordInfo.tableRecord.metric, + &totalNumOfThread); + } else { + if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric + int ret = taosDumpStable( + tableRecordInfo.tableRecord.metric, + fp, taos, g_dbInfos[0]->name); + if (0 == ret) { + superTblCnt++; + } + } + retCode = taosSaveAllNormalTableToTempFile( + taos, tableRecordInfo.tableRecord.name, + tableRecordInfo.tableRecord.metric, &normalTblFd); + } + + if (retCode < 0) { + if (-1 != normalTblFd){ + taosClose(normalTblFd); + } + goto _clean_tmp_file; + } + } - if (-1 != normalTblFd){ - taosClose(normalTblFd); - } + // TODO: save dump super table into result_output.txt + fprintf(g_fpOfResult, "# super table counter: %d\n", + superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + + if (-1 != normalTblFd){ + taosClose(normalTblFd); + } - // start multi threads to dumpout - taosStartDumpOutWorkThreads(taos, arguments, totalNumOfThread, dbInfos[0]->name); + // start multi threads to dumpout + taosStartDumpOutWorkThreads(totalNumOfThread, + g_dbInfos[0]->name); - char tmpFileName[TSDB_FILENAME_LEN + 1]; - _clean_tmp_file: - for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - remove(tmpFileName); - } + char tmpFileName[MAX_FILE_NAME_LEN]; +_clean_tmp_file: + for (int loopCnt = 0; loopCnt < totalNumOfThread; loopCnt++) { + sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); + remove(tmpFileName); + } + } } - } - /* Close the handle and return */ - fclose(fp); - taos_close(taos); - taos_free_result(result); - tfree(command); - taosFreeDbInfos(); - fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); - return 0; + /* Close the handle and return */ + fclose(fp); + taos_close(taos); + taos_free_result(result); + tfree(command); + taosFreeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); + return 0; _exit_failure: - fclose(fp); - taos_close(taos); - taos_free_result(result); - tfree(command); - taosFreeDbInfos(); - fprintf(stderr, "dump out rows: %" PRId64 "\n", totalDumpOutRows); - return -1; + fclose(fp); + taos_close(taos); + taos_free_result(result); + tfree(command); + taosFreeDbInfos(); + errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows); + return -1; } -int taosGetTableDes( +static int taosGetTableDes( char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { - TAOS_ROW row = NULL; - TAOS_RES* res = NULL; - int count = 0; - - char sqlstr[COMMAND_SIZE]; - sprintf(sqlstr, "describe %s.%s;", dbName, table); - - res = taos_query(taosCon, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); - taos_free_result(res); - return -1; - } - - TAOS_FIELD *fields = taos_fetch_fields(res); + TAOS_ROW row = NULL; + TAOS_RES* res = NULL; + int count = 0; - tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); + char sqlstr[COMMAND_SIZE]; + sprintf(sqlstr, "describe %s.%s;", dbName, table); - while ((row = taos_fetch_row(res)) != NULL) { - strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); - strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes); - tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); - - count++; - } + res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - taos_free_result(res); - res = NULL; + TAOS_FIELD *fields = taos_fetch_fields(res); + + tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); + while ((row = taos_fetch_row(res)) != NULL) { + tstrncpy(tableDes->cols[count].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + min(TSDB_COL_NAME_LEN + 1, + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); + tstrncpy(tableDes->cols[count].type, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes) + 1); + tableDes->cols[count].length = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(tableDes->cols[count].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(COL_NOTE_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); + + count++; + } - if (isSuperTable) { - return count; - } + taos_free_result(res); + res = NULL; - // if chidl-table have tag, using select tagName from table to get tagValue - for (int i = 0 ; i < count; i++) { - if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; + if (isSuperTable) { + return count; + } + // if chidl-table have tag, using select tagName from table to get tagValue + for (int i = 0 ; i < count; i++) { + if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; - sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table); - res = taos_query(taosCon, sqlstr); - code = taos_errno(res); - if (code != 0) { - fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); - taos_free_result(res); - return -1; - } + sprintf(sqlstr, "select %s from %s.%s", + tableDes->cols[i].field, dbName, table); - fields = taos_fetch_fields(res); + res = taos_query(taosCon, sqlstr); + code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - row = taos_fetch_row(res); - if (NULL == row) { - fprintf(stderr, " fetch failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); - taos_free_result(res); - return -1; - } + fields = taos_fetch_fields(res); - if (row[0] == NULL) { - sprintf(tableDes->cols[i].note, "%s", "NULL"); - taos_free_result(res); - res = NULL; - continue; - } + row = taos_fetch_row(res); + if (NULL == row) { + errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - int32_t* length = taos_fetch_lengths(res); + if (row[0] == NULL) { + sprintf(tableDes->cols[i].note, "%s", "NULL"); + taos_free_result(res); + res = NULL; + continue; + } - //int32_t* length = taos_fetch_lengths(tmpResult); - switch (fields[0].type) { - case TSDB_DATA_TYPE_BOOL: - sprintf(tableDes->cols[i].note, "%d", ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0])); - break; - case TSDB_DATA_TYPE_SMALLINT: - sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0])); - break; - case TSDB_DATA_TYPE_INT: - sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0])); - break; - case TSDB_DATA_TYPE_BIGINT: - sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0])); - break; - case TSDB_DATA_TYPE_FLOAT: - sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0])); - break; - case TSDB_DATA_TYPE_DOUBLE: - sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0])); - break; - case TSDB_DATA_TYPE_BINARY: { - memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); - tableDes->cols[i].note[0] = '\''; - char tbuf[COL_NOTE_LEN]; - converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); - char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); - *(pstr++) = '\''; - break; - } - case TSDB_DATA_TYPE_NCHAR: { - memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); - char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' - convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); - sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: - sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); - #if 0 - if (!arguments->mysqlFlag) { - sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[0]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000)); + int32_t* length = taos_fetch_lengths(res); + + //int32_t* length = taos_fetch_lengths(tmpResult); + switch (fields[0].type) { + case TSDB_DATA_TYPE_BOOL: + sprintf(tableDes->cols[i].note, "%d", + ((((int32_t)(*((char *)row[0]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + sprintf(tableDes->cols[i].note, "%d", *((int8_t *)row[0])); + break; + case TSDB_DATA_TYPE_SMALLINT: + sprintf(tableDes->cols[i].note, "%d", *((int16_t *)row[0])); + break; + case TSDB_DATA_TYPE_INT: + sprintf(tableDes->cols[i].note, "%d", *((int32_t *)row[0])); + break; + case TSDB_DATA_TYPE_BIGINT: + sprintf(tableDes->cols[i].note, "%" PRId64 "", *((int64_t *)row[0])); + break; + case TSDB_DATA_TYPE_FLOAT: + sprintf(tableDes->cols[i].note, "%f", GET_FLOAT_VAL(row[0])); + break; + case TSDB_DATA_TYPE_DOUBLE: + sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0])); + break; + case TSDB_DATA_TYPE_BINARY: + { + memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + tableDes->cols[i].note[0] = '\''; + char tbuf[COL_NOTE_LEN]; + converStringToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); + char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); + *(pstr++) = '\''; + break; + } + case TSDB_DATA_TYPE_NCHAR: + { + memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' + convertNCharToReadable((char *)row[0], length[0], tbuf, COL_NOTE_LEN); + sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: + sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); +#if 0 + if (!g_args.mysqlFlag) { + sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[0]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + sprintf(tableDes->cols[i].note, "\'%s.%03d\'", buf, (int)(ts % 1000)); + } +#endif + break; + default: + break; } - #endif - break; - default: - break; + + taos_free_result(res); + res = NULL; } - taos_free_result(res); - res = NULL; - } + return count; +} - return count; +static int convertSchemaToAvroSchema(STableDef *tableDes, char **avroSchema) +{ + errorPrint("%s() LN%d TODO: covert table schema to avro schema\n", + __func__, __LINE__); + return 0; } -int32_t taosDumpTable( - char *table, char *metric, struct arguments *arguments, +static int32_t taosDumpTable( + char *table, char *metric, FILE *fp, TAOS* taosCon, char* dbName) { - int count = 0; + int count = 0; - STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + + sizeof(SColDes) * TSDB_MAX_COLUMNS); - if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table - /* - count = taosGetTableDes(metric, tableDes, taosCon); + if (metric != NULL && metric[0] != '\0') { // dump table schema which is created by using super table + /* + count = taosGetTableDes(metric, tableDes, taosCon); - if (count < 0) { - free(tableDes); - return -1; - } + if (count < 0) { + free(tableDes); + return -1; + } - taosDumpCreateTableClause(tableDes, count, fp); + taosDumpCreateTableClause(tableDes, count, fp); - memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); - */ + memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + */ - count = taosGetTableDes(dbName, table, tableDes, taosCon, false); + count = taosGetTableDes(dbName, table, tableDes, taosCon, false); - if (count < 0) { - free(tableDes); - return -1; - } + if (count < 0) { + free(tableDes); + return -1; + } - // create child-table using super-table - taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName); + // create child-table using super-table + taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName); - } else { // dump table definition - count = taosGetTableDes(dbName, table, tableDes, taosCon, false); + } else { // dump table definition + count = taosGetTableDes(dbName, table, tableDes, taosCon, false); - if (count < 0) { - free(tableDes); - return -1; + if (count < 0) { + free(tableDes); + return -1; + } + + // create normal-table or super-table + taosDumpCreateTableClause(tableDes, count, fp, dbName); } - // create normal-table or super-table - taosDumpCreateTableClause(tableDes, count, fp, dbName); - } + char *jsonAvroSchema = NULL; + if (g_args.avro) { + convertSchemaToAvroSchema(tableDes, &jsonAvroSchema); + } - free(tableDes); + free(tableDes); + + int32_t ret = 0; + if (!g_args.schemaonly) { + ret = taosDumpTableData(fp, table, taosCon, dbName, + jsonAvroSchema); + } - return taosDumpTableData(fp, table, arguments, taosCon, dbName); + return ret; } -void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { - char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - - char *pstr = sqlstr; - pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); - if (isDumpProperty) { - pstr += sprintf(pstr, - "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", - dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache, - dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->fsync, dbInfo->cachelast, - dbInfo->comp, dbInfo->precision, dbInfo->update); - } +static void taosDumpCreateDbClause( + SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; + + char *pstr = sqlstr; + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); + if (isDumpProperty) { + pstr += sprintf(pstr, + "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", + dbInfo->replica, dbInfo->quorum, dbInfo->days, + dbInfo->keeplist, + dbInfo->cache, + dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, + dbInfo->fsync, + dbInfo->cachelast, + dbInfo->comp, dbInfo->precision, dbInfo->update); + } - pstr += sprintf(pstr, ";"); - fprintf(fp, "%s\n\n", sqlstr); + pstr += sprintf(pstr, ";"); + fprintf(fp, "%s\n\n", sqlstr); } -void* taosDumpOutWorkThreadFp(void *arg) +static void* taosDumpOutWorkThreadFp(void *arg) { - SThreadParaObj *pThread = (SThreadParaObj*)arg; - STableRecord tableRecord; - int fd; - - char tmpBuf[TSDB_FILENAME_LEN*4] = {0}; - sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex); - fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpBuf); - return NULL; - } - - FILE *fp = NULL; - memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); - - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.tables.%d.sql", g_args.outpath, pThread->dbName, pThread->threadIndex); - } else { - sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); - } + SThreadParaObj *pThread = (SThreadParaObj*)arg; + STableRecord tableRecord; + int fd; + + setThreadName("dumpOutWorkThrd"); + + char tmpBuf[4096] = {0}; + sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + errorPrint("%s() LN%d, failed to open temp file: %s\n", + __func__, __LINE__, tmpBuf); + return NULL; + } - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", tmpBuf); - close(fd); - return NULL; - } + FILE *fp = NULL; + memset(tmpBuf, 0, 4096); - memset(tmpBuf, 0, TSDB_FILENAME_LEN); - sprintf(tmpBuf, "use %s", pThread->dbName); + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.tables.%d.sql", + g_args.outpath, pThread->dbName, pThread->threadIndex); + } else { + sprintf(tmpBuf, "%s.tables.%d.sql", + pThread->dbName, pThread->threadIndex); + } - TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf); - int32_t code = taos_errno(tmpResult); - if (code != 0) { - fprintf(stderr, "invalid database %s\n", pThread->dbName); - taos_free_result(tmpResult); - fclose(fp); - close(fd); - return NULL; - } + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + close(fd); + return NULL; + } - int fileNameIndex = 1; - int tablesInOneFile = 0; - int64_t lastRowsPrint = 5000000; - fprintf(fp, "USE %s;\n\n", pThread->dbName); - while (1) { - ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); - if (readLen <= 0) break; - - int ret = taosDumpTable( - tableRecord.name, tableRecord.metric, &g_args, - fp, pThread->taosCon, pThread->dbName); - if (ret >= 0) { - // TODO: sum table count and table rows by self - pThread->tablesOfDumpOut++; - pThread->rowsOfDumpOut += ret; - - if (pThread->rowsOfDumpOut >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from database %s\n", - pThread->rowsOfDumpOut, pThread->dbName); - lastRowsPrint += 5000000; - } + memset(tmpBuf, 0, 4096); + sprintf(tmpBuf, "use %s", pThread->dbName); - tablesInOneFile++; - if (tablesInOneFile >= g_args.table_batch) { + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf); + int32_t code = taos_errno(tmpResult); + if (code != 0) { + errorPrint("%s() LN%d, invalid database %s. reason: %s\n", + __func__, __LINE__, pThread->dbName, taos_errstr(tmpResult)); + taos_free_result(tmpResult); fclose(fp); - tablesInOneFile = 0; + close(fd); + return NULL; + } - memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", - g_args.outpath, pThread->dbName, - pThread->threadIndex, fileNameIndex); - } else { - sprintf(tmpBuf, "%s.tables.%d-%d.sql", - pThread->dbName, pThread->threadIndex, fileNameIndex); - } - fileNameIndex++; - - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", tmpBuf); - close(fd); - taos_free_result(tmpResult); - return NULL; +#if 0 + int fileNameIndex = 1; + int tablesInOneFile = 0; +#endif + int64_t lastRowsPrint = 5000000; + fprintf(fp, "USE %s;\n\n", pThread->dbName); + while (1) { + ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); + if (readLen <= 0) break; + + int ret = taosDumpTable( + tableRecord.name, tableRecord.metric, + fp, pThread->taosCon, pThread->dbName); + if (ret >= 0) { + // TODO: sum table count and table rows by self + pThread->tablesOfDumpOut++; + pThread->rowsOfDumpOut += ret; + + if (pThread->rowsOfDumpOut >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from database %s\n", + pThread->rowsOfDumpOut, pThread->dbName); + lastRowsPrint += 5000000; + } + +#if 0 + tablesInOneFile++; + if (tablesInOneFile >= g_args.table_batch) { + fclose(fp); + tablesInOneFile = 0; + + memset(tmpBuf, 0, 4096); + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", + g_args.outpath, pThread->dbName, + pThread->threadIndex, fileNameIndex); + } else { + sprintf(tmpBuf, "%s.tables.%d-%d.sql", + pThread->dbName, pThread->threadIndex, fileNameIndex); + } + fileNameIndex++; + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + close(fd); + taos_free_result(tmpResult); + return NULL; + } + } +#endif } - } } - } - taos_free_result(tmpResult); - close(fd); - fclose(fp); + taos_free_result(tmpResult); + close(fd); + fclose(fp); - return NULL; + return NULL; } -static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName) +static void taosStartDumpOutWorkThreads(int32_t numOfThread, char *dbName) { - pthread_attr_t thattr; - SThreadParaObj *threadObj = - (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); - for (int t = 0; t < numOfThread; ++t) { - SThreadParaObj *pThread = threadObj + t; - pThread->rowsOfDumpOut = 0; - pThread->tablesOfDumpOut = 0; - pThread->threadIndex = t; - pThread->totalThreads = numOfThread; - tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); - pThread->taosCon = taosCon; - - pthread_attr_init(&thattr); - pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - - if (pthread_create(&(pThread->threadID), &thattr, taosDumpOutWorkThreadFp, (void*)pThread) != 0) { - fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); - exit(0); + pthread_attr_t thattr; + SThreadParaObj *threadObj = + (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); + + if (threadObj == NULL) { + errorPrint("%s() LN%d, memory allocation failed!\n", + __func__, __LINE__); + return; } - } - for (int32_t t = 0; t < numOfThread; ++t) { - pthread_join(threadObj[t].threadID, NULL); - } + for (int t = 0; t < numOfThread; ++t) { + SThreadParaObj *pThread = threadObj + t; + pThread->rowsOfDumpOut = 0; + pThread->tablesOfDumpOut = 0; + pThread->threadIndex = t; + pThread->totalThreads = numOfThread; + tstrncpy(pThread->dbName, dbName, TSDB_DB_NAME_LEN); + pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (pThread->taosCon == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + free(threadObj); + return; + } + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, + taosDumpOutWorkThreadFp, + (void*)pThread) != 0) { + errorPrint("%s() LN%d, thread:%d failed to start\n", + __func__, __LINE__, pThread->threadIndex); + exit(-1); + } + } - // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt - int64_t totalRowsOfDumpOut = 0; - int64_t totalChildTblsOfDumpOut = 0; - for (int32_t t = 0; t < numOfThread; ++t) { - totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut; - totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut; - } + for (int32_t t = 0; t < numOfThread; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } + + // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt + int64_t totalRowsOfDumpOut = 0; + int64_t totalChildTblsOfDumpOut = 0; + for (int32_t t = 0; t < numOfThread; ++t) { + totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut; + totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut; + } - fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n", totalChildTblsOfDumpOut); - fprintf(g_fpOfResult, "# row counter: %"PRId64"\n", totalRowsOfDumpOut); - g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut; - g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut; - free(threadObj); + fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n", + totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# row counter: %"PRId64"\n", + totalRowsOfDumpOut); + g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut; + g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut; + free(threadObj); } +static int32_t taosDumpStable(char *table, FILE *fp, + TAOS* taosCon, char* dbName) { + uint64_t sizeOfTableDes = (uint64_t)(sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); + STableDef *tableDes = (STableDef *)calloc(1, sizeOfTableDes); + if (NULL == tableDes) { + errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", + __func__, __LINE__, sizeOfTableDes); + exit(-1); + } -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) { - int count = 0; + int count = taosGetTableDes(dbName, table, tableDes, taosCon, true); - STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); - if (NULL == tableDes) { - fprintf(stderr, "failed to allocate memory\n"); - exit(-1); - } + if (count < 0) { + free(tableDes); + errorPrint("%s() LN%d, failed to get stable[%s] schema\n", + __func__, __LINE__, table); + exit(-1); + } - count = taosGetTableDes(dbName, table, tableDes, taosCon, true); + taosDumpCreateTableClause(tableDes, count, fp, dbName); - if (count < 0) { free(tableDes); - fprintf(stderr, "failed to get stable[%s] schema\n", table); - exit(-1); - } - - taosDumpCreateTableClause(tableDes, count, fp, dbName); - - free(tableDes); - return 0; + return 0; } - -int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) +static int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) { - TAOS_ROW row; - int fd = -1; - STableRecord tableRecord; - char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - - sprintf(sqlstr, "show %s.stables", dbName); + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - TAOS_RES* res = taos_query(taosCon, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res)); - taos_free_result(res); - exit(-1); - } + sprintf(sqlstr, "show %s.stables", dbName); - TAOS_FIELD *fields = taos_fetch_fields(res); + TAOS_RES* res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + exit(-1); + } - char tmpFileName[TSDB_FILENAME_LEN + 1]; - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, ".stables.tmp"); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(res); - (void)remove(".stables.tmp"); - exit(-1); - } + TAOS_FIELD *fields = taos_fetch_fields(res); + + char tmpFileName[MAX_FILE_NAME_LEN]; + memset(tmpFileName, 0, MAX_FILE_NAME_LEN); + sprintf(tmpFileName, ".stables.tmp"); + fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + errorPrint("%s() LN%d, failed to open temp file: %s\n", + __func__, __LINE__, tmpFileName); + taos_free_result(res); + (void)remove(".stables.tmp"); + exit(-1); + } - while ((row = taos_fetch_row(res)) != NULL) { - memset(&tableRecord, 0, sizeof(STableRecord)); - strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); - taosWrite(fd, &tableRecord, sizeof(STableRecord)); - } + while ((row = taos_fetch_row(res)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1); + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + } - taos_free_result(res); - (void)lseek(fd, 0, SEEK_SET); + taos_free_result(res); + (void)lseek(fd, 0, SEEK_SET); - int superTblCnt = 0; - while (1) { - ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); - if (readLen <= 0) break; + int superTblCnt = 0; + while (1) { + ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); + if (readLen <= 0) break; - int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName); - if (0 == ret) { - superTblCnt++; + int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName); + if (0 == ret) { + superTblCnt++; + } } - } - // TODO: save dump super table into result_output.txt - fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); - g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + // TODO: save dump super table into result_output.txt + fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; - close(fd); - (void)remove(".stables.tmp"); + close(fd); + (void)remove(".stables.tmp"); - return 0; + return 0; } -int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon) { - TAOS_ROW row; - int fd = -1; - STableRecord tableRecord; - - taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); - - fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name); - g_resultStatistics.totalDatabasesOfDumpOut++; +static int taosDumpDb(SDbInfo *dbInfo, FILE *fp, TAOS *taosCon) { + TAOS_ROW row; + int fd = -1; + STableRecord tableRecord; - char sqlstr[TSDB_MAX_SQL_LEN] = {0}; + taosDumpCreateDbClause(dbInfo, g_args.with_property, fp); - fprintf(fp, "USE %s;\n\n", dbInfo->name); + fprintf(g_fpOfResult, "\n#### database: %s\n", + dbInfo->name); + g_resultStatistics.totalDatabasesOfDumpOut++; - (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); - - sprintf(sqlstr, "show %s.tables", dbInfo->name); - - TAOS_RES* res = taos_query(taosCon, sqlstr); - int code = taos_errno(res); - if (code != 0) { - fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); - taos_free_result(res); - return -1; - } + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - char tmpBuf[TSDB_FILENAME_LEN + 1]; - memset(tmpBuf, 0, TSDB_FILENAME_LEN); - sprintf(tmpBuf, ".show-tables.tmp"); - fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); - taos_free_result(res); - return -1; - } + fprintf(fp, "USE %s;\n\n", dbInfo->name); - TAOS_FIELD *fields = taos_fetch_fields(res); + (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); - int32_t numOfTable = 0; - while ((row = taos_fetch_row(res)) != NULL) { - memset(&tableRecord, 0, sizeof(STableRecord)); - tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); - tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], - fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); + sprintf(sqlstr, "show %s.tables", dbInfo->name); - taosWrite(fd, &tableRecord, sizeof(STableRecord)); + TAOS_RES* res = taos_query(taosCon, sqlstr); + int code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason:%s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - numOfTable++; - } - taos_free_result(res); - lseek(fd, 0, SEEK_SET); - - int maxThreads = g_args.thread_num; - int tableOfPerFile ; - if (numOfTable <= g_args.thread_num) { - tableOfPerFile = 1; - maxThreads = numOfTable; - } else { - tableOfPerFile = numOfTable / g_args.thread_num; - if (0 != numOfTable % g_args.thread_num) { - tableOfPerFile += 1; + char tmpBuf[MAX_FILE_NAME_LEN]; + memset(tmpBuf, 0, MAX_FILE_NAME_LEN); + sprintf(tmpBuf, ".show-tables.tmp"); + fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + errorPrint("%s() LN%d, failed to open temp file: %s\n", + __func__, __LINE__, tmpBuf); + taos_free_result(res); + return -1; } - } - char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); - if (NULL == tblBuf){ - fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); - close(fd); - return -1; - } + TAOS_FIELD *fields = taos_fetch_fields(res); - int32_t numOfThread = 0; - int subFd = -1; - for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) { - memset(tmpBuf, 0, TSDB_FILENAME_LEN); - sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); - subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (subFd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); - for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); - (void)remove(tmpBuf); - } - sprintf(tmpBuf, ".show-tables.tmp"); - (void)remove(tmpBuf); - free(tblBuf); - close(fd); - return -1; + int32_t numOfTable = 0; + while ((row = taos_fetch_row(res)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); + tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes) + 1); + tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], + min(TSDB_TABLE_NAME_LEN, + fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes) + 1); + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); + + numOfTable++; } + taos_free_result(res); + lseek(fd, 0, SEEK_SET); - // read tableOfPerFile for fd, write to subFd - ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); - if (readLen <= 0) { - close(subFd); - break; + int maxThreads = g_args.thread_num; + int tableOfPerFile ; + if (numOfTable <= g_args.thread_num) { + tableOfPerFile = 1; + maxThreads = numOfTable; + } else { + tableOfPerFile = numOfTable / g_args.thread_num; + if (0 != numOfTable % g_args.thread_num) { + tableOfPerFile += 1; + } } - taosWrite(subFd, tblBuf, readLen); - close(subFd); - } - sprintf(tmpBuf, ".show-tables.tmp"); - (void)remove(tmpBuf); + char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); + if (NULL == tblBuf){ + errorPrint("failed to calloc %" PRIzu "\n", + tableOfPerFile * sizeof(STableRecord)); + close(fd); + return -1; + } - if (fd >= 0) { - close(fd); - fd = -1; - } + int32_t numOfThread = 0; + int subFd = -1; + for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) { + memset(tmpBuf, 0, MAX_FILE_NAME_LEN); + sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); + subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (subFd == -1) { + errorPrint("%s() LN%d, failed to open temp file: %s\n", + __func__, __LINE__, tmpBuf); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + sprintf(tmpBuf, ".show-tables.tmp"); + (void)remove(tmpBuf); + free(tblBuf); + close(fd); + return -1; + } - taos_free_result(res); + // read tableOfPerFile for fd, write to subFd + ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); + if (readLen <= 0) { + close(subFd); + break; + } + taosWrite(subFd, tblBuf, readLen); + close(subFd); + } - // start multi threads to dumpout - taosStartDumpOutWorkThreads(taosCon, arguments, numOfThread, dbInfo->name); - for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + sprintf(tmpBuf, ".show-tables.tmp"); (void)remove(tmpBuf); - } - free(tblBuf); - return 0; + if (fd >= 0) { + close(fd); + fd = -1; + } + + // start multi threads to dumpout + taosStartDumpOutWorkThreads(numOfThread, dbInfo->name); + for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + + free(tblBuf); + return 0; } -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName) { +static void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, + FILE *fp, char* dbName) { int counter = 0; int count_temp = 0; char sqlstr[COMMAND_SIZE]; @@ -1702,254 +1913,291 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, cha fprintf(fp, "%s\n\n", sqlstr); } -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName) { - int counter = 0; - int count_temp = 0; +static void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, + int numOfCols, FILE *fp, char* dbName) { + int counter = 0; + int count_temp = 0; - char* tmpBuf = (char *)malloc(COMMAND_SIZE); - if (tmpBuf == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } + char* tmpBuf = (char *)malloc(COMMAND_SIZE); + if (tmpBuf == NULL) { + errorPrint("%s() LN%d, failed to allocate %d memory\n", + __func__, __LINE__, COMMAND_SIZE); + return; + } - char *pstr = NULL; - pstr = tmpBuf; + char *pstr = NULL; + pstr = tmpBuf; - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", - dbName, tableDes->name, dbName, metric); + pstr += sprintf(tmpBuf, + "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", + dbName, tableDes->name, dbName, metric); - for (; counter < numOfCols; counter++) { - if (tableDes->cols[counter].note[0] != '\0') break; - } + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; + } - assert(counter < numOfCols); - count_temp = counter; + assert(counter < numOfCols); + count_temp = counter; + + for (; counter < numOfCols; counter++) { + if (counter != count_temp) { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); + } + } else { + if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || + strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } else { + pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); + } + /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */ + } - for (; counter < numOfCols; counter++) { - if (counter != count_temp) { - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { - //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); - pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); - } else { - pstr += sprintf(pstr, ", %s", tableDes->cols[counter].note); - } - } else { - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { - //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); - pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); - } else { - pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); - } - /* pstr += sprintf(pstr, "%s", tableDes->cols[counter].note); */ + /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") + * == 0) { */ + /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */ + /* } */ } - /* if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || strcasecmp(tableDes->cols[counter].type, "nchar") - * == 0) { */ - /* pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); */ - /* } */ - } - - pstr += sprintf(pstr, ");"); + pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n", tmpBuf); - free(tmpBuf); + fprintf(fp, "%s\n", tmpBuf); + free(tmpBuf); } -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName) { - int64_t lastRowsPrint = 5000000; - int64_t totalRows = 0; - int count = 0; - char *pstr = NULL; - TAOS_ROW row = NULL; - int numFields = 0; +static int writeSchemaToAvro(char *jsonAvroSchema) +{ + errorPrint("%s() LN%d, TODO: implement write schema to avro", + __func__, __LINE__); + return 0; +} - if (arguments->schemaonly) { +static int64_t writeResultToAvro(TAOS_RES *res) +{ + errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__); return 0; - } +} - int32_t sql_buf_len = arguments->max_sql_len; - char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); - if (tmpBuffer == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } +static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) +{ + int64_t totalRows = 0; + + int32_t sql_buf_len = g_args.max_sql_len; + char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); + if (tmpBuffer == NULL) { + errorPrint("failed to allocate %d memory\n", sql_buf_len + 128); + return -1; + } - pstr = tmpBuffer; + char *pstr = tmpBuffer; - char sqlstr[1024] = {0}; - sprintf(sqlstr, - "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", - dbName, tbname, arguments->start_time, arguments->end_time); + TAOS_ROW row = NULL; + int numFields = 0; + int rowFlag = 0; + int64_t lastRowsPrint = 5000000; + int count = 0; - TAOS_RES* tmpResult = taos_query(taosCon, sqlstr); - int32_t code = taos_errno(tmpResult); - if (code != 0) { - fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult)); - free(tmpBuffer); - taos_free_result(tmpResult); - return -1; - } + numFields = taos_field_count(res); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(res); - numFields = taos_field_count(tmpResult); - assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + int32_t curr_sqlstr_len = 0; + int32_t total_sqlstr_len = 0; - int rowFlag = 0; - int32_t curr_sqlstr_len = 0; - int32_t total_sqlstr_len = 0; - count = 0; - while ((row = taos_fetch_row(tmpResult)) != NULL) { - pstr = tmpBuffer; - curr_sqlstr_len = 0; + while ((row = taos_fetch_row(res)) != NULL) { + curr_sqlstr_len = 0; - int32_t* length = taos_fetch_lengths(tmpResult); // act len + int32_t* length = taos_fetch_lengths(res); // act len - if (count == 0) { - total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s.%s VALUES (", dbName, tbname); - } else { - if (arguments->mysqlFlag) { - if (0 == rowFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - rowFlag++; + if (count == 0) { + total_sqlstr_len = 0; + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "INSERT INTO %s.%s VALUES (", dbName, tbName); } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); + if (g_args.mysqlFlag) { + if (0 == rowFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + rowFlag++; + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); + } + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + } } - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - } - } - for (int col = 0; col < numFields; col++) { - if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); + for (int col = 0; col < numFields; col++) { + if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); - if (row[col] == NULL) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); - continue; - } + if (row[col] == NULL) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); + continue; + } - switch (fields[col].type) { - case TSDB_DATA_TYPE_BOOL: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col])); - break; - case TSDB_DATA_TYPE_SMALLINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col])); - break; - case TSDB_DATA_TYPE_INT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col])); - break; - case TSDB_DATA_TYPE_BIGINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *((int64_t *)row[col])); - break; - case TSDB_DATA_TYPE_FLOAT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col])); - break; - case TSDB_DATA_TYPE_DOUBLE: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); - break; - case TSDB_DATA_TYPE_BINARY: { - char tbuf[COMMAND_SIZE] = {0}; - //*(pstr++) = '\''; - converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - //pstr = stpcpy(pstr, tbuf); - //*(pstr++) = '\''; - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); - break; + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); + break; + case TSDB_DATA_TYPE_TINYINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col])); + break; + case TSDB_DATA_TYPE_SMALLINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col])); + break; + case TSDB_DATA_TYPE_INT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col])); + break; + case TSDB_DATA_TYPE_BIGINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", + *((int64_t *)row[col])); + break; + case TSDB_DATA_TYPE_FLOAT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col])); + break; + case TSDB_DATA_TYPE_DOUBLE: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); + break; + case TSDB_DATA_TYPE_BINARY: + { + char tbuf[COMMAND_SIZE] = {0}; + //*(pstr++) = '\''; + converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); + //pstr = stpcpy(pstr, tbuf); + //*(pstr++) = '\''; + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_NCHAR: + { + char tbuf[COMMAND_SIZE] = {0}; + convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: + if (!g_args.mysqlFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", + *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", + buf, (int)(ts % 1000)); + } + break; + default: + break; + } } - case TSDB_DATA_TYPE_NCHAR: { - char tbuf[COMMAND_SIZE] = {0}; - convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); - break; + + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); + + totalRows++; + count++; + fprintf(fp, "%s", tmpBuffer); + + if (totalRows >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from %s.%s\n", + totalRows, dbName, tbName); + lastRowsPrint += 5000000; + } + + total_sqlstr_len += curr_sqlstr_len; + + if ((count >= g_args.data_batch) + || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { + fprintf(fp, ";\n"); + count = 0; } - case TSDB_DATA_TYPE_TIMESTAMP: - if (!arguments->mysqlFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[col]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", buf, (int)(ts % 1000)); - } - break; - default: - break; - } } - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") "); + debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); - totalRows++; - count++; - fprintf(fp, "%s", tmpBuffer); + fprintf(fp, "\n"); + atomic_add_fetch_64(&g_totalDumpOutRows, totalRows); + free(tmpBuffer); - if (totalRows >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname); - lastRowsPrint += 5000000; - } + return 0; +} - total_sqlstr_len += curr_sqlstr_len; +static int taosDumpTableData(FILE *fp, char *tbName, + TAOS* taosCon, char* dbName, + char *jsonAvroSchema) { + int64_t totalRows = 0; - if ((count >= arguments->data_batch) || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { - fprintf(fp, ";\n"); - count = 0; - } //else { - //fprintf(fp, "\\\n"); - //} - } + char sqlstr[1024] = {0}; + sprintf(sqlstr, + "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", + dbName, tbName, g_args.start_time, g_args.end_time); + + TAOS_RES* res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("failed to run command %s, reason: %s\n", + sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - fprintf(fp, "\n"); - atomic_add_fetch_64(&totalDumpOutRows, totalRows); + if (g_args.avro) { + writeSchemaToAvro(jsonAvroSchema); + totalRows = writeResultToAvro(res); + } else { + totalRows = writeResultToSql(res, fp, dbName, tbName); + } - taos_free_result(tmpResult); - free(tmpBuffer); - return totalRows; + taos_free_result(res); + return totalRows; } -int taosCheckParam(struct arguments *arguments) { - if (arguments->all_databases && arguments->databases) { - fprintf(stderr, "conflict option --all-databases and --databases\n"); - return -1; - } +static int taosCheckParam(struct arguments *arguments) { + if (g_args.all_databases && g_args.databases) { + fprintf(stderr, "conflict option --all-databases and --databases\n"); + return -1; + } - if (arguments->start_time > arguments->end_time) { - fprintf(stderr, "start time is larger than end time\n"); - return -1; - } + if (g_args.start_time > g_args.end_time) { + fprintf(stderr, "start time is larger than end time\n"); + return -1; + } - if (arguments->arg_list_len == 0) { - if ((!arguments->all_databases) && (!arguments->isDumpIn)) { - fprintf(stderr, "taosdump requires parameters\n"); - return -1; + if (g_args.arg_list_len == 0) { + if ((!g_args.all_databases) && (!g_args.isDumpIn)) { + errorPrint("%s", "taosdump requires parameters for database and operation\n"); + return -1; + } + } + /* + if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } + */ + if (!g_args.isDumpIn && g_args.encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; } - } -/* - if (arguments->isDumpIn && (strcmp(arguments->outpath, DEFAULT_DUMP_FILE) != 0)) { - fprintf(stderr, "duplicate parameter input and output file path\n"); - return -1; - } -*/ - if (!arguments->isDumpIn && arguments->encode != NULL) { - fprintf(stderr, "invalid option in dump out\n"); - return -1; - } - if (arguments->table_batch <= 0) { - fprintf(stderr, "invalid option in dump out\n"); - return -1; - } + if (g_args.table_batch <= 0) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } - return 0; + return 0; } -bool isEmptyCommand(char *cmd) { +/* +static bool isEmptyCommand(char *cmd) { char *pchar = cmd; while (*pchar != '\0') { @@ -1960,8 +2208,8 @@ bool isEmptyCommand(char *cmd) { return true; } -void taosReplaceCtrlChar(char *str) { - _Bool ctrlOn = false; +static void taosReplaceCtrlChar(char *str) { + bool ctrlOn = false; char *pstr = NULL; for (pstr = str; *str != '\0'; ++str) { @@ -2003,6 +2251,7 @@ void taosReplaceCtrlChar(char *str) { *pstr = '\0'; } +*/ char *ascii_literal_list[] = { "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", @@ -2026,374 +2275,423 @@ char *ascii_literal_list[] = { "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; -int converStringToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - while (size > 0) { - if (*pstr == '\0') break; - pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); - pstr++; - size--; - } - *pbuf = '\0'; - return 0; +static int converStringToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + while (size > 0) { + if (*pstr == '\0') break; + pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); + pstr++; + size--; + } + *pbuf = '\0'; + return 0; } -int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - // TODO - wchar_t wc; - while (size > 0) { - if (*pstr == '\0') break; - int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); - if (byte_width < 0) { - fprintf(stderr, "mbtowc() return fail.\n"); - exit(-1); - } - - if ((int)wc < 256) { - pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); - } else { - memcpy(pbuf, pstr, byte_width); - pbuf += byte_width; +static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + // TODO + wchar_t wc; + while (size > 0) { + if (*pstr == '\0') break; + int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); + if (byte_width < 0) { + errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__); + exit(-1); + } + + if ((int)wc < 256) { + pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + } else { + memcpy(pbuf, pstr, byte_width); + pbuf += byte_width; + } + pstr += byte_width; } - pstr += byte_width; - } - *pbuf = '\0'; + *pbuf = '\0'; - return 0; + return 0; } -void taosDumpCharset(FILE *fp) { - char charsetline[256]; +static void taosDumpCharset(FILE *fp) { + char charsetline[256]; - (void)fseek(fp, 0, SEEK_SET); - sprintf(charsetline, "#!%s\n", tsCharset); - (void)fwrite(charsetline, strlen(charsetline), 1, fp); + (void)fseek(fp, 0, SEEK_SET); + sprintf(charsetline, "#!%s\n", tsCharset); + (void)fwrite(charsetline, strlen(charsetline), 1, fp); } -void taosLoadFileCharset(FILE *fp, char *fcharset) { - char * line = NULL; - size_t line_size = 0; +static void taosLoadFileCharset(FILE *fp, char *fcharset) { + char * line = NULL; + size_t line_size = 0; - (void)fseek(fp, 0, SEEK_SET); - ssize_t size = getline(&line, &line_size, fp); - if (size <= 2) { - goto _exit_no_charset; - } + (void)fseek(fp, 0, SEEK_SET); + ssize_t size = getline(&line, &line_size, fp); + if (size <= 2) { + goto _exit_no_charset; + } - if (strncmp(line, "#!", 2) != 0) { - goto _exit_no_charset; - } - if (line[size - 1] == '\n') { - line[size - 1] = '\0'; - size--; - } - strcpy(fcharset, line + 2); + if (strncmp(line, "#!", 2) != 0) { + goto _exit_no_charset; + } + if (line[size - 1] == '\n') { + line[size - 1] = '\0'; + size--; + } + strcpy(fcharset, line + 2); - tfree(line); - return; + tfree(line); + return; _exit_no_charset: - (void)fseek(fp, 0, SEEK_SET); - *fcharset = '\0'; - tfree(line); - return; + (void)fseek(fp, 0, SEEK_SET); + *fcharset = '\0'; + tfree(line); + return; } // ======== dumpIn support multi threads functions ================================// -static char **tsDumpInSqlFiles = NULL; -static int32_t tsSqlFileNum = 0; -static char tsDbSqlFile[TSDB_FILENAME_LEN] = {0}; -static char tsfCharset[64] = {0}; -static int taosGetFilesNum(const char *directoryName, const char *prefix) +static char **g_tsDumpInSqlFiles = NULL; +static int32_t g_tsSqlFileNum = 0; +static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0}; +static char g_tsCharset[64] = {0}; + +static int taosGetFilesNum(const char *directoryName, + const char *prefix, const char *prefix2) { - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + char cmd[1024] = { 0 }; - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(0); - } + if (prefix2) + sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ", + directoryName, prefix, directoryName, prefix2); + else + sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); - int fileNum = 0; - if (fscanf(fp, "%d", &fileNum) != 1) { - fprintf(stderr, "ERROR: failed to execute:%s, parse result error\n", cmd); - exit(0); - } + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(-1); + } - if (fileNum <= 0) { - fprintf(stderr, "ERROR: directory:%s is empry\n", directoryName); - exit(0); - } + int fileNum = 0; + if (fscanf(fp, "%d", &fileNum) != 1) { + errorPrint("failed to execute:%s, parse result error\n", cmd); + exit(-1); + } + + if (fileNum <= 0) { + errorPrint("directory:%s is empry\n", directoryName); + exit(-1); + } - pclose(fp); - return fileNum; + pclose(fp); + return fileNum; } -static void taosParseDirectory(const char *directoryName, const char *prefix, char **fileArray, int totalFiles) +static void taosParseDirectory(const char *directoryName, + const char *prefix, const char *prefix2, + char **fileArray, int totalFiles) { - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + char cmd[1024] = { 0 }; - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(0); - } + if (prefix2) { + sprintf(cmd, "ls %s/*.%s %s/*.%s | sort", + directoryName, prefix, directoryName, prefix2); + } else { + sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + } - int fileNum = 0; - while (fscanf(fp, "%128s", fileArray[fileNum++])) { - if (strcmp(fileArray[fileNum-1], tsDbSqlFile) == 0) { - fileNum--; + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(-1); } - if (fileNum >= totalFiles) { - break; + + int fileNum = 0; + while (fscanf(fp, "%128s", fileArray[fileNum++])) { + if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) { + fileNum--; + } + if (fileNum >= totalFiles) { + break; + } } - } - if (fileNum != totalFiles) { - fprintf(stderr, "ERROR: directory:%s changed while read\n", directoryName); - pclose(fp); - exit(0); - } + if (fileNum != totalFiles) { + errorPrint("directory:%s changed while read\n", directoryName); + pclose(fp); + exit(-1); + } - pclose(fp); + pclose(fp); } -static void taosCheckTablesSQLFile(const char *directoryName) +static void taosCheckDatabasesSQLFile(const char *directoryName) { - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/dbs.sql", directoryName); + char cmd[1024] = { 0 }; + sprintf(cmd, "ls %s/dbs.sql", directoryName); - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - fprintf(stderr, "ERROR: failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(0); - } + FILE *fp = popen(cmd, "r"); + if (fp == NULL) { + errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); + exit(-1); + } - while (fscanf(fp, "%128s", tsDbSqlFile)) { - break; - } + while (fscanf(fp, "%128s", g_tsDbSqlFile)) { + break; + } - pclose(fp); + pclose(fp); } -static void taosMallocSQLFiles() +static void taosMallocDumpFiles() { - tsDumpInSqlFiles = (char**)calloc(tsSqlFileNum, sizeof(char*)); - for (int i = 0; i < tsSqlFileNum; i++) { - tsDumpInSqlFiles[i] = calloc(1, TSDB_FILENAME_LEN); - } + g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*)); + for (int i = 0; i < g_tsSqlFileNum; i++) { + g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + } } -static void taosFreeSQLFiles() +static void taosFreeDumpFiles() { - for (int i = 0; i < tsSqlFileNum; i++) { - tfree(tsDumpInSqlFiles[i]); - } - tfree(tsDumpInSqlFiles); + for (int i = 0; i < g_tsSqlFileNum; i++) { + tfree(g_tsDumpInSqlFiles[i]); + } + tfree(g_tsDumpInSqlFiles); } static void taosGetDirectoryFileList(char *inputDir) { - struct stat fileStat; - if (stat(inputDir, &fileStat) < 0) { - fprintf(stderr, "ERROR: %s not exist\n", inputDir); - exit(0); - } - - if (fileStat.st_mode & S_IFDIR) { - taosCheckTablesSQLFile(inputDir); - tsSqlFileNum = taosGetFilesNum(inputDir, "sql"); - int tsSqlFileNumOfTbls = tsSqlFileNum; - if (tsDbSqlFile[0] != 0) { - tsSqlFileNumOfTbls--; + struct stat fileStat; + if (stat(inputDir, &fileStat) < 0) { + errorPrint("%s not exist\n", inputDir); + exit(-1); } - taosMallocSQLFiles(); - if (0 != tsSqlFileNumOfTbls) { - taosParseDirectory(inputDir, "sql", tsDumpInSqlFiles, tsSqlFileNumOfTbls); + + if (fileStat.st_mode & S_IFDIR) { + taosCheckDatabasesSQLFile(inputDir); + if (g_args.avro) + g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro"); + else + g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL); + + int tsSqlFileNumOfTbls = g_tsSqlFileNum; + if (g_tsDbSqlFile[0] != 0) { + tsSqlFileNumOfTbls--; + } + taosMallocDumpFiles(); + if (0 != tsSqlFileNumOfTbls) { + if (g_args.avro) { + taosParseDirectory(inputDir, "sql", "avro", + g_tsDumpInSqlFiles, tsSqlFileNumOfTbls); + } else { + taosParseDirectory(inputDir, "sql", NULL, + g_tsDumpInSqlFiles, tsSqlFileNumOfTbls); + } + } + fprintf(stdout, "\nstart to dispose %d files in %s\n", + g_tsSqlFileNum, inputDir); + } else { + errorPrint("%s is not a directory\n", inputDir); + exit(-1); } - fprintf(stdout, "\nstart to dispose %d files in %s\n", tsSqlFileNum, inputDir); - } - else { - fprintf(stderr, "ERROR: %s is not a directory\n", inputDir); - exit(0); - } } -static FILE* taosOpenDumpInFile(char *fptr) { - wordexp_t full_path; - - if (wordexp(fptr, &full_path, 0) != 0) { - fprintf(stderr, "ERROR: illegal file name: %s\n", fptr); - return NULL; - } +static FILE* taosOpenDumpInFile(char *fptr) { + wordexp_t full_path; - char *fname = full_path.we_wordv[0]; + if (wordexp(fptr, &full_path, 0) != 0) { + errorPrint("illegal file name: %s\n", fptr); + return NULL; + } - FILE *f = fopen(fname, "r"); - if (f == NULL) { - fprintf(stderr, "ERROR: failed to open file %s\n", fname); - wordfree(&full_path); - return NULL; - } + char *fname = full_path.we_wordv[0]; - wordfree(&full_path); + FILE *f = NULL; + if ((fname) && (strlen(fname) > 0)) { + f = fopen(fname, "r"); + if (f == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, fname); + } + } - return f; + wordfree(&full_path); + return f; } -int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { - int read_len = 0; - char * cmd = NULL; - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; - - cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); - if (cmd == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } +static int taosDumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, + char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; + + cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); + if (cmd == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + return -1; + } - int lastRowsPrint = 5000000; - int lineNo = 0; - while ((read_len = getline(&line, &line_len, fp)) != -1) { - ++lineNo; - if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; - line[--read_len] = '\0'; + int lastRowsPrint = 5000000; + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; + line[--read_len] = '\0'; - //if (read_len == 0 || isCommentLine(line)) { // line starts with # - if (read_len == 0 ) { - continue; - } + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; + } - memcpy(cmd + cmd_len, line, read_len); - cmd[read_len + cmd_len]= '\0'; - if (queryDbImpl(taos, cmd)) { - fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); - fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); - } + memcpy(cmd + cmd_len, line, read_len); + cmd[read_len + cmd_len]= '\0'; + if (queryDbImpl(taos, cmd)) { + errorPrint("%s() LN%d, error sql: linenu:%d, file:%s\n", + __func__, __LINE__, lineNo, fileName); + fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + } - memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; + memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); + cmd_len = 0; - if (lineNo >= lastRowsPrint) { - printf(" %d lines already be executed from file %s\n", lineNo, fileName); - lastRowsPrint += 5000000; + if (lineNo >= lastRowsPrint) { + printf(" %d lines already be executed from file %s\n", lineNo, fileName); + lastRowsPrint += 5000000; + } } - } - tfree(cmd); - tfree(line); - fclose(fp); - return 0; + tfree(cmd); + tfree(line); + fclose(fp); + return 0; } -void* taosDumpInWorkThreadFp(void *arg) +static void* taosDumpInWorkThreadFp(void *arg) { - SThreadParaObj *pThread = (SThreadParaObj*)arg; - for (int32_t f = 0; f < tsSqlFileNum; ++f) { - if (f % pThread->totalThreads == pThread->threadIndex) { - char *SQLFileName = tsDumpInSqlFiles[f]; - FILE* fp = taosOpenDumpInFile(SQLFileName); - if (NULL == fp) { - continue; - } - fprintf(stderr, "Success Open input file: %s\n", SQLFileName); - taosDumpInOneFile(pThread->taosCon, fp, tsfCharset, g_args.encode, SQLFileName); + SThreadParaObj *pThread = (SThreadParaObj*)arg; + setThreadName("dumpInWorkThrd"); + + for (int32_t f = 0; f < g_tsSqlFileNum; ++f) { + if (f % pThread->totalThreads == pThread->threadIndex) { + char *SQLFileName = g_tsDumpInSqlFiles[f]; + FILE* fp = taosOpenDumpInFile(SQLFileName); + if (NULL == fp) { + continue; + } + fprintf(stderr, ", Success Open input file: %s\n", + SQLFileName); + taosDumpInOneFile(pThread->taosCon, fp, g_tsCharset, g_args.encode, SQLFileName); + } } - } - return NULL; + return NULL; } -static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args) +static void taosStartDumpInWorkThreads() { - pthread_attr_t thattr; - SThreadParaObj *pThread; - int32_t totalThreads = args->thread_num; + pthread_attr_t thattr; + SThreadParaObj *pThread; + int32_t totalThreads = g_args.thread_num; - if (totalThreads > tsSqlFileNum) { - totalThreads = tsSqlFileNum; - } + if (totalThreads > g_tsSqlFileNum) { + totalThreads = g_tsSqlFileNum; + } - SThreadParaObj *threadObj = (SThreadParaObj *)calloc(totalThreads, sizeof(SThreadParaObj)); - for (int32_t t = 0; t < totalThreads; ++t) { - pThread = threadObj + t; - pThread->threadIndex = t; - pThread->totalThreads = totalThreads; - pThread->taosCon = taosCon; + SThreadParaObj *threadObj = (SThreadParaObj *)calloc( + totalThreads, sizeof(SThreadParaObj)); - pthread_attr_init(&thattr); - pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (NULL == threadObj) { + errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); + } - if (pthread_create(&(pThread->threadID), &thattr, taosDumpInWorkThreadFp, (void*)pThread) != 0) { - fprintf(stderr, "ERROR: thread:%d failed to start\n", pThread->threadIndex); - exit(0); + for (int32_t t = 0; t < totalThreads; ++t) { + pThread = threadObj + t; + pThread->threadIndex = t; + pThread->totalThreads = totalThreads; + pThread->taosCon = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (pThread->taosCon == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + free(threadObj); + return; + } + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&(pThread->threadID), &thattr, + taosDumpInWorkThreadFp, (void*)pThread) != 0) { + errorPrint("%s() LN%d, thread:%d failed to start\n", + __func__, __LINE__, pThread->threadIndex); + exit(0); + } } - } - for (int t = 0; t < totalThreads; ++t) { - pthread_join(threadObj[t].threadID, NULL); - } + for (int t = 0; t < totalThreads; ++t) { + pthread_join(threadObj[t].threadID, NULL); + } - for (int t = 0; t < totalThreads; ++t) { - taos_close(threadObj[t].taosCon); - } - free(threadObj); + for (int t = 0; t < totalThreads; ++t) { + taos_close(threadObj[t].taosCon); + } + free(threadObj); } +static int taosDumpIn() { + assert(g_args.isDumpIn); + + TAOS *taos = NULL; + FILE *fp = NULL; -int taosDumpIn(struct arguments *arguments) { - assert(arguments->isDumpIn); + taos = taos_connect( + g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (taos == NULL) { + errorPrint("%s() LN%d, failed to connect to TDengine server\n", + __func__, __LINE__); + return -1; + } - TAOS *taos = NULL; - FILE *fp = NULL; + taosGetDirectoryFileList(g_args.inpath); - taos = taos_connect(arguments->host, arguments->user, arguments->password, NULL, arguments->port); - if (taos == NULL) { - fprintf(stderr, "failed to connect to TDengine server\n"); - return -1; - } + int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum; + if (g_tsDbSqlFile[0] != 0) { + tsSqlFileNumOfTbls--; - taosGetDirectoryFileList(arguments->inpath); + fp = taosOpenDumpInFile(g_tsDbSqlFile); + if (NULL == fp) { + errorPrint("%s() LN%d, failed to open input file %s\n", + __func__, __LINE__, g_tsDbSqlFile); + return -1; + } + fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile); - int32_t tsSqlFileNumOfTbls = tsSqlFileNum; - if (tsDbSqlFile[0] != 0) { - tsSqlFileNumOfTbls--; + taosLoadFileCharset(fp, g_tsCharset); - fp = taosOpenDumpInFile(tsDbSqlFile); - if (NULL == fp) { - fprintf(stderr, "failed to open input file %s\n", tsDbSqlFile); - return -1; + taosDumpInOneFile(taos, fp, g_tsCharset, g_args.encode, + g_tsDbSqlFile); } - fprintf(stderr, "Success Open input file: %s\n", tsDbSqlFile); - - taosLoadFileCharset(fp, tsfCharset); - taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); - } + taos_close(taos); - if (0 != tsSqlFileNumOfTbls) { - taosStartDumpInWorkThreads(taos, arguments); - } + if (0 != tsSqlFileNumOfTbls) { + taosStartDumpInWorkThreads(); + } - taos_close(taos); - taosFreeSQLFiles(); - return 0; + taosFreeDumpFiles(); + return 0; } - diff --git a/src/kit/taospack/CMakeLists.txt b/src/kit/taospack/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..58c36887329f0deb6839162dd966c96d09edbc0f --- /dev/null +++ b/src/kit/taospack/CMakeLists.txt @@ -0,0 +1,21 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +PROJECT(TDengine) + +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/os/inc) +INCLUDE_DIRECTORIES(inc) + +IF (TD_LINUX) + AUX_SOURCE_DIRECTORY(. SRC) + ADD_EXECUTABLE(taospack ${SRC}) + TARGET_LINK_LIBRARIES(taospack os tutil tsdb ${VAR_TSZ}) +ELSEIF (TD_WINDOWS) + AUX_SOURCE_DIRECTORY(. SRC) + ADD_EXECUTABLE(taospack ${SRC}) + TARGET_LINK_LIBRARIES(taospack) +ELSEIF (TD_DARWIN) + # MAC + AUX_SOURCE_DIRECTORY(. SRC) + ADD_EXECUTABLE(taospack ${SRC}) + TARGET_LINK_LIBRARIES(taospack os tutil tsdb) +ENDIF () \ No newline at end of file diff --git a/src/kit/taospack/taospack.c b/src/kit/taospack/taospack.c new file mode 100644 index 0000000000000000000000000000000000000000..33d779dfcf7abd1315107fd45f79eaed57581768 --- /dev/null +++ b/src/kit/taospack/taospack.c @@ -0,0 +1,766 @@ + + +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include + +#if defined(WINDOWS) +int main(int argc, char *argv[]) { + printf("welcome to use taospack tools v1.3 for windows.\n"); +} +#elif !defined(TD_TSZ) +int main(int argc, char *argv[]) { + printf(" welcome taospack. \n You not open TSZ , please define TD_TSZ to open TSZ algo.\n"); +} +#else + +#include "os.h" +#include "tscompression.h" +#include "tdataformat.h" + + + +// ------- define ----------- +#define FT_CNT 8 + +// ------- function declare -------- +void cost_start(); +double cost_end(const char* tag); +int notsame_cnt = 0; + +// ------- global declare ----------- +float g_ft1[ ] = {\ + 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.9010000228881836, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.630000114440918, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7079999446868896, 3.7079999446868896, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7119998931884766, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7100000381469727, 3.7079999446868896, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7070000171661377, 3.7100000381469727, 3.7119998931884766, 3.7119998931884766, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7139999866485596, 3.7109999656677246, 3.7109999656677246, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7130000591278076, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7119998931884766, 3.7130000591278076, 3.7109999656677246, 3.7079999446868896, 3.7090001106262207, 3.7090001106262207, 3.7119998931884766, 3.7109999656677246, 3.7109999656677246, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7090001106262207, 3.7109999656677246, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7079999446868896, 3.7079999446868896, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7090001106262207, 3.7100000381469727, 3.7090001106262207, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7109999656677246, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7149999141693115, 3.7079999446868896, 3.7070000171661377, 3.7049999237060547, 3.7060000896453857, 3.7060000896453857\ + ,3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.634000062942505, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422 + ,3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.9010000228881836, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.630000114440918, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7079999446868896, 3.7079999446868896, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7119998931884766, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7100000381469727, 3.7079999446868896, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7070000171661377, 3.7100000381469727, 3.7119998931884766, 3.7119998931884766, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7139999866485596, 3.7109999656677246, 3.7109999656677246, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7130000591278076, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7119998931884766, 3.7130000591278076, 3.7109999656677246, 3.7079999446868896, 3.7090001106262207, 3.7090001106262207, 3.7119998931884766, 3.7109999656677246, 3.7109999656677246, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7090001106262207, 3.7109999656677246, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7079999446868896, 3.7079999446868896, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7090001106262207, 3.7100000381469727, 3.7090001106262207, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7109999656677246, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7149999141693115, 3.7079999446868896, 3.7070000171661377, 3.7049999237060547, 3.7060000896453857, 3.7060000896453857\ + ,3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.634000062942505, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422 + }; + +double g_de1[ ] = {\ + 3.895999908447265612345, 3.897000074386596701234, 3.89800000190734860123456789, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.9010000228881836, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.630000114440918, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7079999446868896, 3.7079999446868896, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7119998931884766, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7100000381469727, 3.7079999446868896, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7070000171661377, 3.7100000381469727, 3.7119998931884766, 3.7119998931884766, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7139999866485596, 3.7109999656677246, 3.7109999656677246, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7130000591278076, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7119998931884766, 3.7130000591278076, 3.7109999656677246, 3.7079999446868896, 3.7090001106262207, 3.7090001106262207, 3.7119998931884766, 3.7109999656677246, 3.7109999656677246, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7090001106262207, 3.7109999656677246, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7079999446868896, 3.7079999446868896, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7090001106262207, 3.7100000381469727, 3.7090001106262207, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7109999656677246, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7149999141693115, 3.7079999446868896, 3.7070000171661377, 3.7049999237060547, 3.7060000896453857, 3.7060000896453857\ + ,3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.634000062942505, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422 + ,3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8980000019073486, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8980000019073486, 3.8949999809265137, 3.8959999084472656, 3.8980000019073486, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.9010000228881836, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8980000019073486, 3.8970000743865967, 3.8989999294281006, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8949999809265137, 3.8970000743865967, 3.8980000019073486, 3.8980000019073486, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8970000743865967, 3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8959999084472656, 3.8970000743865967, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.9000000953674316, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8959999084472656, 3.8980000019073486, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.630000114440918, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.8940000534057617, 3.8959999084472656, 3.8970000743865967, 3.8970000743865967, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8929998874664307, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8959999084472656, 3.8929998874664307, 3.8949999809265137, 3.8929998874664307, 3.8959999084472656, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8940000534057617, 3.8970000743865967, 3.8940000534057617, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8949999809265137, 3.8940000534057617, 3.8940000534057617, 3.8959999084472656, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8959999084472656, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787, 3.8929998874664307, 3.8940000534057617, 3.8929998874664307, 3.8940000534057617, 3.8949999809265137, 3.8929998874664307, 3.8929998874664307, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8929998874664307, 3.8940000534057617, 3.8989999294281006, 3.8929998874664307, 3.8929998874664307, 3.8929998874664307, 3.8919999599456787, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8949999809265137, 3.8970000743865967, 3.8940000534057617, 3.8940000534057617, 3.8940000534057617, 3.8929998874664307, 3.8919999599456787\ + ,3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7079999446868896, 3.7079999446868896, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7119998931884766, 3.7090001106262207, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7100000381469727, 3.7079999446868896, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7070000171661377, 3.7100000381469727, 3.7119998931884766, 3.7119998931884766, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7139999866485596, 3.7109999656677246, 3.7109999656677246, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7130000591278076, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7119998931884766, 3.7130000591278076, 3.7109999656677246, 3.7079999446868896, 3.7090001106262207, 3.7090001106262207, 3.7119998931884766, 3.7109999656677246, 3.7109999656677246, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7090001106262207, 3.7109999656677246, 3.7119998931884766, 3.7109999656677246, 3.7090001106262207, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7109999656677246, 3.7119998931884766, 3.7079999446868896, 3.7079999446868896, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7100000381469727, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7100000381469727, 3.7070000171661377, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7090001106262207, 3.7100000381469727, 3.7090001106262207, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7079999446868896, 3.7090001106262207, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7070000171661377, 3.7090001106262207, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7070000171661377, 3.7060000896453857, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7060000896453857, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7060000896453857, 3.7060000896453857, 3.7109999656677246, 3.7070000171661377, 3.7060000896453857, 3.7060000896453857, 3.7049999237060547, 3.7070000171661377, 3.7100000381469727, 3.7079999446868896, 3.7070000171661377, 3.7070000171661377, 3.7079999446868896, 3.7079999446868896, 3.7149999141693115, 3.7079999446868896, 3.7070000171661377, 3.7049999237060547, 3.7060000896453857, 3.7060000896453857\ + ,3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.634000062942505, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.63100004196167, 3.63100004196167, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.63100004196167, 3.63100004196167, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.63100004196167, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.631999969482422, 3.632999897003174, 3.631999969482422, 3.632999897003174, 3.631999969482422 + }; + +/* +struct timeval startTime; +struct timeval endTime; +struct timeval costStart; +double totalCost = 0; + +void cost_start() +{ + totalCost = 0; + gettimeofday(&costStart, NULL); +} + +double cost_end(const char* tag) +{ + double elapsed; + struct timeval costEnd; + gettimeofday(&costEnd, NULL); + elapsed = ((costEnd.tv_sec*1000000+costEnd.tv_usec)-(costStart.tv_sec*1000000+costStart.tv_usec))/1000000.0; + totalCost += elapsed; + double use_ms = totalCost*1000; + printf(" timecost %s : %.3f ms\n", tag, use_ms); + return use_ms; +} +*/ + +float toFloat(char* buf){ + return (float)atoll(buf); +} + +// +// read float +// +float* read_float(const char* inFile, int* pcount){ + // check valid + if(inFile == NULL || inFile[0] == 0 ){ + printf(" inFile is NULL or EMPTY.\n"); + return NULL; + } + + // read in file + FILE* pfin = fopen(inFile, "r"); + if(pfin == NULL){ + printf(" open IN file %s error. errno=%d\n", inFile, errno); + return false; + } + + // read one line from infile and write to outfile + char buf[256]={0}; + int malloc_cnt = 100000; + float* floats = malloc(malloc_cnt*sizeof(float)); + int fi = 0; + while(fgets(buf, sizeof(buf), pfin) != NULL) { + // get item + if(buf[0] == 0 || strcmp(buf, " ") == 0) + continue; + floats[fi] = atof(buf); + //printf(" buff=%s float=%.50f \n ", buf, floats[fi]); + if ( ++fi == malloc_cnt ) { + malloc_cnt += 100000; + floats = realloc(floats, malloc_cnt*sizeof(float)); + } + memset(buf, 0, sizeof(buf)); + } + + // close + fclose(pfin); + if(pcount) + *pcount = fi; + return floats; +} + +float check_same(float* ft1, float* ft2, int count){ + int same_cnt =0; + for(int i=0; i< count; i++){ + + if(ft1[i] == ft2[i]){ + same_cnt ++; + } + + if(i < 5){ + printf(" i=%d ft1=%.40f diff=%.40f \n", i, ft1[i], ft1[i] - ft2[i]); + printf(" i=%d ft2=%.40f \n", i, ft2[i]); + } + + } + float same_rate = same_cnt*100/count; + printf(" all count=%d same=%d same rate=%.0f%% \n", count, same_cnt, same_rate); + return same_rate; +} + +double check_same_double(double* ft1, double* ft2, int count){ + int same_cnt =0; + for(int i=0; i< count; i++){ + + if(ft1[i] == ft2[i]){ + same_cnt ++; + } + + if(i < 5){ + printf(" i=%d ft1=%.40f diff=%.40f \n", i, ft1[i], ft1[i] - ft2[i]); + printf(" i=%d ft2=%.40f \n", i, ft2[i]); + } + + } + double same_rate = same_cnt*100/count; + printf(" all count=%d same=%d same rate=%.0f%% \n", count, same_cnt, same_rate); + return same_rate; +} + +// +// test compress and decompress +// + +bool DoDouble(double* doubles, int cnt, int algorithm) { + // compress + const char* input = (const char*)doubles; + int input_len = cnt * sizeof(double); + char* output = (char*) malloc(input_len); + int output_len = input_len; + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + cost_start(); + int ret_len = 0; + if(algorithm == 2) + ret_len = tsCompressDouble(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + else + ret_len = tsCompressDoubleLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == -1) { + printf(" compress error.\n"); + return 0; + } + double use_ms1 = cost_end("compress"); + + printf(" compress len=%d input len=%d\n", ret_len, input_len); + double rate=100*(double)ret_len/(double)input_len; + printf(" compress rate=%.1f an-rate=%.4f%%\n", (double)input_len/(double)ret_len, rate); + + // + // decompress + // + double* ft2 = (double*)malloc(input_len); + cost_start(); + int code = 0; + + if(algorithm == 2) + code = tsDecompressDouble(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + else + code = tsDecompressDoubleLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + + + double use_ms2 = cost_end("Decompress"); + printf(" Decompress return length=%d \n", code); + + // compare same + double same_rate = check_same_double(doubles, ft2, cnt); + + printf("\n ------------------ count:%d <%s> ---------------- \n", cnt, algorithm == 2?"TD":"SZ"); + printf(" Compress Rate ......... [%.2f%%] \n", rate); + double speed1 = (cnt*sizeof(double)*1000/1024/1024)/use_ms1; + printf(" Compress Time ......... [%.4fms] speed=%.1f MB/s\n", use_ms1, speed1); + double speed2 = (cnt*sizeof(double)*1000/1024/1024)/use_ms2; + printf(" Decompress Time........ [%.4fms] speed=%.1f MB/s\n", use_ms2, speed2); + printf(" Same Rate ............. [%.0f%%] \n\n", same_rate); + + + // free + free(ft2); + free(buff); + free(output); + + return true; +} + +bool DoFloat(float* floats, int cnt, int algorithm, bool lossy) { + // compress + const char* input = (const char*)floats; + int input_len = cnt * sizeof(float); + char* output = (char*) malloc(input_len); + int output_len = input_len; + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + cost_start(); + int ret_len = 0; + ret_len = tsCompressFloat(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == -1) { + printf(" compress error.\n"); + return 0; + } + double use_ms1 = cost_end("compress"); + + printf(" compress len=%d input len=%d\n", ret_len, input_len); + float rate=100*(float)ret_len/(float)input_len; + printf(" compress rate=%.1f an-rate=%.4f%%\n", (float)input_len/(float)ret_len, rate); + + // + // decompress + // + float* ft2 = (float*)malloc(input_len); + cost_start(); + int code = 0; + code = tsDecompressFloat(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + + double use_ms2 = cost_end("Decompress"); + printf(" Decompress return length=%d \n", code); + + // compare same + float same_rate = check_same(floats, ft2, cnt); + + printf("\n ------------------ count:%d <%s> ---------------- \n", cnt, lossy?"SZ":"TD"); + printf(" Compress Rate ......... [%.2f%%] \n", rate); + double speed1 = (cnt*sizeof(float)*1000/1024/1024)/use_ms1; + printf(" Compress Time ......... [%.4fms] speed=%.1f MB/s\n", use_ms1, speed1); + double speed2 = (cnt*sizeof(float)*1000/1024/1024)/use_ms2; + printf(" Decompress Time........ [%.4fms] speed=%.1f MB/s\n", use_ms2, speed2); + printf(" Same Rate ............. [%.0f%%] \n\n", same_rate); + + + // free + free(ft2); + free(buff); + free(output); + + return true; +} + + +bool testFile(const char* inFile, char algorithm, bool lossy){ + // check valid + if(inFile == NULL || inFile[0] == 0 ){ + printf(" inFile is NULL or EMPTY.\n"); + return false; + } + + int cnt = 0; + float* floats = read_float(inFile, &cnt); + if(floats == NULL) { + return false; + } + + DoFloat(floats, cnt, algorithm, lossy); + + free(floats); + return true; +} +// +// txt to binary file +// +#define BUFF_CNT 256 +bool txt_to_bin(const char* inFile, const char* outFile){ + // check valid + if(inFile == NULL || outFile == NULL || inFile[0] == 0 || outFile[0] == 0){ + printf(" inFile or outFile is NULL or EMPTY.\n"); + return false; + } + + printf(" infile=%s \n", inFile); + printf(" outfile=%s \n", outFile); + + // read in file + FILE* pfin = fopen(inFile, "r"); + if(pfin == NULL){ + printf(" open IN file %s error. errno=%d\n", inFile, errno); + return false; + } + + // create out file + FILE* pfout = fopen(outFile, "w+"); + if(pfout == NULL){ + printf(" open OUT file %s error. errno=%d\n", outFile, errno); + fclose(pfin); + return false; + } + + // read one line from infile and write to outfile + char buf[256]={0}; + float fbuf[BUFF_CNT] = {0}; + int fi = 0; + int count = 0; + while(fgets(buf, sizeof(buf), pfin) != NULL) { + // get item + fbuf[fi] = toFloat(buf); + if ( ++fi == BUFF_CNT ) { + // write + if(fwrite(fbuf, sizeof(float), BUFF_CNT, pfout) == 0) { + printf(" write to file %s error , code=%d . app exit.\n", outFile, errno); + exit(1); + } + fi = 0; + } + count ++; + memset(buf, 0, sizeof(buf)); + } + + // write retain + if( fi > 0){ + if(fwrite(fbuf, sizeof(float), fi, pfout) == 0) { + printf(" write to file %s error , code=%d . app exit.\n", outFile, errno); + exit(1); + } + } + + // close + fclose(pfin); + fclose(pfout); + + // total + printf(" count=%d write bytes=%d \n", count, (int)(count*sizeof(float))); + return true; +} + + +int memTestDouble() { + // douelbe + const char* input = (const char*) g_de1; + double* de1 = g_de1; + int input_len = sizeof(g_de1); + int cnt = input_len/sizeof(double); + printf(" input input_len=%d count=%d \n", input_len, cnt); + + char* output = (char*) malloc(input_len); + int output_len = input_len; + + char buff[1024] ={0}; + int buff_len = sizeof(buff); + + cost_start(); + int ret_len = tsCompressDoubleLossy(input, input_len, cnt, output, output_len, ONE_STAGE_COMP, buff, buff_len); + if(ret_len == -1) { + printf(" compress error.\n"); + return 0; + } + cost_end(" tscompress"); + + printf(" compress return len=%d input len=%d\n", ret_len, input_len); + printf(" compress rate=%.1f an-rate=%.0f%%\n", (float)input_len/(float)ret_len, 100*(float)ret_len/(float)input_len); + + // + // decompress + // + double* de2 = (double*)malloc(input_len); + cost_start(); + int code = tsDecompressDoubleLossy(output, ret_len, cnt, (char*)de2, input_len, ONE_STAGE_COMP, buff, buff_len); + cost_end("tsde-compress double"); + printf(" de-compress return code=%d \n", code); + + + // + // show + // + int same_cnt = 0; + int diffshow_cnt = 0; + for(int i=0; i< cnt; i++){ + + if(i < 10) + printf(" i=%d de1=%.20f de2=%.20f same=%d\n", i, de1[i], de2[i], de1[i] == de2[i]); + + if(de1[i] == de2[i]) + same_cnt ++; + else { + notsame_cnt++; + if(i >= 10 && ++diffshow_cnt < 50){ + printf(" i=%d de1=%.20f de2=%.20f diff\n", i, de1[i], de2[i]); + } + } + } + + printf(" ---- result doulbe : same =%d cnt=%d rate:%d%% global not same=%d---- \n", same_cnt, cnt, same_cnt*100/cnt, notsame_cnt); + + free(output); + free(de2); + return 1; +} + + + + +int memTest() { + + // + //float ft1[] = {1.2, 2.4, 3.33, 4.444, 5.555, 6.6666, 7.7777, 8.88888,1.2, 2.4, 3.33, 4.444, 5.555, 6.6666, 7.7777, 8.88888,1.2, 2.4, 3.33, 4.444, 5.555, 6.6666, 7.7777, 8.88888}; + // + + float* ft1 = g_ft1; + const char* input = (const char*) ft1; + int input_len = sizeof(g_ft1); + int cnt = input_len/sizeof(float); + printf(" input input_len=%d count=%d \n", input_len, cnt); + + + char* output = (char*) malloc(input_len); + int output_len = input_len; + + char buff[1024] ={0}; + int buff_len = sizeof(buff); + + cost_start(); + int ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, ONE_STAGE_COMP, buff, buff_len); + if(ret_len == -1) { + printf(" compress error.\n"); + return 0; + } + cost_end(" tscompress"); + + printf(" compress return len=%d input len=%d\n", ret_len, input_len); + printf(" compress sz rate=%.1f an-rate=%.2f%%\n", (float)input_len/(float)ret_len, 100*(float)ret_len/(float)input_len); + + // + // decompress + // + float* ft2 = (float*)malloc(input_len); + cost_start(); + int code = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, ONE_STAGE_COMP, buff, buff_len); + cost_end("tsde-compress"); + printf(" de-compress return code=%d \n", code); + + + // + // show + // + int same_cnt = 0; + int diffshow_cnt = 0; + for(int i=0; i< cnt; i++){ + + if(i < 10) + printf(" i=%d ft1=%.20f ft2=%.20f same=%d\n", i, ft1[i], ft2[i], ft1[i] == ft2[i]); + + if(ft1[i] == ft2[i]) + same_cnt ++; + else { + notsame_cnt++; + if(i >= 10 && ++diffshow_cnt < 50){ + printf(" i=%d ft1=%.20f ft2=%.20f diff\n", i, ft1[i], ft2[i]); + } + } + } + + printf(" ---- result : same =%d cnt=%d rate:%d%% global not same=%d---- \n", same_cnt, cnt, same_cnt*100/cnt, notsame_cnt); + + free(output); + free(ft2); + return 1; +} + +void* memTestThread(void* lparam) { + //memTest(); + printf(" enter thread ....\n"); + for(int i=0; i<1; i++) + { + memTest(); + printf(" start i=%d .... \n", i); + } + return NULL; +} + +void test_threadsafe(int thread_count){ + printf(" test thread safe . thread count=%d \n", thread_count); + pthread_t handle[1000000]; + int i=0; + for(i=0; i< thread_count; i++){ + printf(" create thread %d... \n", i); + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_create(&handle[i], &attr, memTestThread, NULL); + pthread_attr_destroy(&attr); + } + + for(i=0; i< thread_count; i++) + { + pthread_join(handle[i], NULL); + } + + printf(" test thread safe end. not same count=%d\n", notsame_cnt); + +} + + +void* memTestThreadDouble(void* lparam) { + //memTest(); + printf(" enter thread ....\n"); + for(int i=0; i< 1; i++) + { + memTest(); + printf(" double start i=%d .... \n", i); + } + return NULL; +} + +void test_threadsafe_double(int thread_count){ + printf(" test thread safe . thread count=%d \n", thread_count); + pthread_t handle[1000000]; + int i=0; + for(i=0; i< thread_count; i++){ + printf(" create thread %d... \n", i); + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_create(&handle[i], &attr, memTestThreadDouble, NULL); + pthread_attr_destroy(&attr); + } + + for(i=0; i< thread_count; i++) + { + pthread_join(handle[i], NULL); + } + + printf("\n ---- double test thread safe end. not same count=%d-----\n", notsame_cnt); + +} + + +void unitTestFloat() { + + float ft1 [] = {1.11, 2.22, 3.333}; + int cnt = sizeof(ft1)/sizeof(float); + float* floats = ft1; + int algorithm = 2; + + // compress + const char* input = (const char*)floats; + int input_len = cnt * sizeof(float); + int output_len = input_len + 1024; + char* output = (char*) malloc(output_len); + char* buff = (char*) malloc(input_len); + int buff_len = input_len; + + printf(" ft1 have count=%d \n", cnt); + strcpy(output, "abcde"); + + cost_start(); + int ret_len = 0; + ret_len = tsCompressFloatLossy(input, input_len, cnt, output, output_len, algorithm, buff, buff_len); + + if(ret_len == 0) { + printf(" compress error.\n"); + return ; + } + double use_ms1 = cost_end("compress"); + + printf(" compress len=%d input len=%d\n", ret_len, input_len); + float rate=100*(float)ret_len/(float)input_len; + printf(" compress rate=%.1f an-rate=%.4f%%\n", (float)input_len/(float)ret_len, rate); + + // + // decompress + // + float* ft2 = (float*)malloc(input_len); + cost_start(); + int code = 0; + code = tsDecompressFloatLossy(output, ret_len, cnt, (char*)ft2, input_len, algorithm, buff, buff_len); + + + double use_ms2 = cost_end("Decompress"); + printf(" Decompress return length=%d \n", code); + + // compare same + float same_rate = check_same(floats, ft2, cnt); + + printf("\n ------------------ count:%d TD ---------------- \n", cnt); + printf(" Compress Rate ......... [%.0f%%] \n", rate); + double speed1 = (cnt*sizeof(float)*1000/1024/1024)/use_ms1; + printf(" Compress Time ......... [%.4fms] speed=%.1f MB/s\n", use_ms1, speed1); + double speed2 = (cnt*sizeof(float)*1000/1024/1024)/use_ms2; + printf(" Decompress Time........ [%.4fms] speed=%.1f MB/s\n", use_ms2, speed2); + printf(" Same Rate ............. [%.0f%%] \n\n", same_rate); + + + // free + free(ft2); + free(buff); + free(output); + +} + +#define DB_CNT 500 +void test_same_double(int algo){ + double ori = 3.1415926; + + double doubles [DB_CNT]; + for(int i=0; i< DB_CNT; i++){ + doubles[i] = ori; + } + + DoDouble(doubles, DB_CNT, algo); + +} + +#ifdef TD_TSZ +extern char lossyColumns []; +extern bool lossyDouble; +extern bool lossyFloat; +extern double fPrecision; +extern char Compressor []; +#endif +// +// ----------------- main ---------------------- +// +int main(int argc, char *argv[]) { + printf("welcome to use taospack tools v1.3\n"); + + //printf(" sizeof(int)=%d\n", (int)sizeof(int)); + //printf(" sizeof(long)=%d\n", (int)sizeof(long)); + //printf(" sizeof(short)=%d\n",(int)sizeof(short)); + + + strcpy(lossyColumns, "float|double"); + bool lossy = true; + //fPrecision = 1E-5; + //strcpy(Compressor, "GZIP_COMPRESSOR"); + + tsCompressInit(); + lossyFloat = lossyDouble = true; + + printf(" fPrecision=%.15f\n",fPrecision); + + + // + //tsCompressExit(); + //return 1; + //printf(" SZ_SIZE_TYPE=%d", ) + + if(argc == 3){ + char algo = 0; + // t + if(strcmp(argv[1], "-tone") == 0 || strcmp(argv[1], "-t") == 0 ) { + algo = ONE_STAGE_COMP; + lossyFloat = lossyDouble = true; + } + if(strcmp(argv[1], "-tw") == 0) { + algo = TWO_STAGE_COMP; + lossy = false; + lossyFloat = lossyDouble = false; + } + + if(strcmp(argv[1], "-sf") == 0) { + test_threadsafe(atoi(argv[2])); + return 0; + } + + if(strcmp(argv[1], "-sd") == 0) { + test_threadsafe_double(atoi(argv[2])); + return 0; + } + + if(strcmp(argv[1], "-samed") == 0) { + test_same_double(atoi(argv[2])); + return 0; + } + + if(algo == 0){ + printf(" no param -tone -tw \n"); + return 0; + } + + bool ret = testFile(argv[2], algo, lossy); + printf(" test file %s. \n", ret ? "ok" : "err"); + return 1; + + } else if( argc == 2) { + if(strcmp(argv[1], "-mem") == 0) { + memTest(); + } + } + else{ + unitTestFloat(); + } + + tsCompressExit(); + return 0; +} + +#endif + diff --git a/src/mnode/CMakeLists.txt b/src/mnode/CMakeLists.txt index 2df4708c239515febafc7a4f3ab3f63bd9e434e8..a7fc54d87786f430f913980f089d29d969b01fce 100644 --- a/src/mnode/CMakeLists.txt +++ b/src/mnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h index e8cf7e30ac03bcfcafe5eaf9785bf0df60bf80c4..e974251d2ef024cb5977378d31c26c31fa718dcd 100644 --- a/src/mnode/inc/mnodeDef.h +++ b/src/mnode/inc/mnodeDef.h @@ -214,6 +214,23 @@ typedef struct SUserObj { struct SAcctObj * pAcct; } SUserObj; +typedef struct SFuncObj { + char name[TSDB_FUNC_NAME_LEN]; + char path[128]; + int32_t contLen; + char cont[TSDB_FUNC_CODE_LEN]; + int32_t funcType; + int32_t bufSize; + int64_t createdTime; + uint8_t resType; + int16_t resBytes; + int64_t sig; // partial md5 sign + int16_t type; // [lua script|so|js] + int8_t reserved[64]; + int8_t updateEnd[4]; + int32_t refCount; +} SFuncObj; + typedef struct { int64_t totalStorage; // Total storage wrtten from this account int64_t compStorage; // Compressed storage on disk @@ -258,7 +275,7 @@ typedef struct { void * pIter; void ** ppShow; int16_t offset[TSDB_MAX_COLUMNS]; - int16_t bytes[TSDB_MAX_COLUMNS]; + int32_t bytes[TSDB_MAX_COLUMNS]; int32_t numOfReads; int8_t maxReplica; int8_t reserved0[1]; diff --git a/src/mnode/inc/mnodeFunc.h b/src/mnode/inc/mnodeFunc.h new file mode 100644 index 0000000000000000000000000000000000000000..e33f3fb5671faeea39154a8ed98cb8923d54a501 --- /dev/null +++ b/src/mnode/inc/mnodeFunc.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_MNODE_FUNC_H +#define TDENGINE_MNODE_FUNC_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "mnodeDef.h" + +int32_t mnodeInitFuncs(); +void mnodeCleanupFuncs(); + +SFuncObj *mnodeGetFunc(char *name); +void * mnodeGetNextFunc(void *pIter, SFuncObj **pFunc); +void mnodeCancelGetNextFunc(void *pIter); + +void mnodeIncFuncRef(SFuncObj *pFunc); +void mnodeDecFuncRef(SFuncObj *pFunc); + +int32_t mnodeCreateFunc(SAcctObj *pAcct, char *name, int32_t codeLen, char *code, char *path, uint8_t outputType, int16_t outputLen, int32_t funcType, int32_t bufSize, SMnodeMsg *pMsg); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/mnode/inc/mnodeSdb.h b/src/mnode/inc/mnodeSdb.h index 2ae0c47902677944e000bfdb564bc1d65f9daf99..25b841ef34d35c0691ead226f6be46e12e298125 100644 --- a/src/mnode/inc/mnodeSdb.h +++ b/src/mnode/inc/mnodeSdb.h @@ -33,7 +33,8 @@ typedef enum { SDB_TABLE_VGROUP = 6, SDB_TABLE_STABLE = 7, SDB_TABLE_CTABLE = 8, - SDB_TABLE_MAX = 9 + SDB_TABLE_FUNC = 9, + SDB_TABLE_MAX = 10 } ESdbTable; typedef enum { @@ -112,4 +113,4 @@ int32_t mnodeCompactWal(); } #endif -#endif \ No newline at end of file +#endif diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index f4fc88476f8c2ddeb32acc4f37d27f17c76af788..9602e16483f3a367117f998cc619dbe408859d93 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -554,6 +554,9 @@ void mnodeCleanupDbs() { tsDbSdb = NULL; } + + + static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { int32_t cols = 0; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index a087b076a59e8b7f849d6d97b7f8e4e6283f7756..02cf1c782c54f61e9bf113d7319e1009e4e7d946 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -16,7 +16,6 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tgrant.h" -#include "tbn.h" #include "tglobal.h" #include "tconfig.h" #include "tutil.h" @@ -101,6 +100,8 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) { pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED; } + pDnode->customScore = 0; + dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort); mnodeUpdateDnodeEps(); @@ -630,7 +631,8 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { } int32_t numOfMnodes = mnodeGetMnodesNum(); - if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum() && !pDnode->isMgmt) { + if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum() + && bnDnodeCanCreateMnode(pDnode)) { bnNotify(); } @@ -1144,6 +1146,7 @@ static int32_t mnodeRetrieveConfigs(SShowObj *pShow, char *data, int32_t rows, v numOfRows++; break; case TAOS_CFG_VTYPE_FLOAT: + case TAOS_CFG_VTYPE_DOUBLE: t = snprintf(varDataVal(pWrite), TSDB_CFG_VALUE_LEN, "%f", *((float *)cfg->ptr)); varDataSetLen(pWrite, t); numOfRows++; @@ -1296,4 +1299,4 @@ int32_t mnodeCompactDnodes() { mInfo("end to compact dnodes table..."); return 0; -} \ No newline at end of file +} diff --git a/src/mnode/src/mnodeFunc.c b/src/mnode/src/mnodeFunc.c new file mode 100644 index 0000000000000000000000000000000000000000..253958efbe9a0d81d0542217447eaf750e10caf9 --- /dev/null +++ b/src/mnode/src/mnodeFunc.c @@ -0,0 +1,485 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "trpc.h" +#include "tutil.h" +#include "tglobal.h" +#include "tgrant.h" +#include "tdataformat.h" +#include "tkey.h" +#include "mnode.h" +#include "dnode.h" +#include "mnodeDef.h" +#include "mnodeInt.h" +#include "mnodeAcct.h" +#include "mnodeUser.h" +#include "mnodeMnode.h" +#include "mnodeSdb.h" +#include "mnodeShow.h" +#include "mnodeFunc.h" +#include "mnodeWrite.h" +#include "mnodeRead.h" +#include "mnodePeer.h" + +int64_t tsFuncRid = -1; +static void * tsFuncSdb = NULL; +static int32_t tsFuncUpdateSize = 0; +static int32_t mnodeGetFuncMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn); +static int32_t mnodeRetrieveFuncs(SShowObj *pShow, char *data, int32_t rows, void *pConn); +static int32_t mnodeProcessRetrieveFuncImplMsg(SMnodeMsg *pMsg); +static int32_t mnodeProcessCreateFuncMsg(SMnodeMsg *pMsg); +static int32_t mnodeProcessDropFuncMsg(SMnodeMsg *pMsg); + +static int32_t mnodeFuncActionDestroy(SSdbRow *pRow) { + tfree(pRow->pObj); + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeFuncActionInsert(SSdbRow *pRow) { + SFuncObj *pFunc = pRow->pObj; + + mTrace("func:%s, contLen: %d, insert into sdb", pFunc->name, pFunc->contLen); + + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeFuncActionDelete(SSdbRow *pRow) { + SFuncObj *pFunc = pRow->pObj; + + mTrace("func:%s, length: %d, delete from sdb", pFunc->name, pFunc->contLen); + + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeFuncActionUpdate(SSdbRow *pRow) { + SFuncObj *pFunc = pRow->pObj; + + SFuncObj *pSaved = mnodeGetFunc(pFunc->name); + if (pFunc != pSaved) { + memcpy(pSaved, pFunc, tsFuncUpdateSize); + free(pFunc); + } + + mnodeDecFuncRef(pSaved); + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeFuncActionEncode(SSdbRow *pRow) { + SFuncObj *pFunc = pRow->pObj; + + memcpy(pRow->rowData, pFunc, tsFuncUpdateSize); + pRow->rowSize = tsFuncUpdateSize; + + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeFuncActionDecode(SSdbRow *pRow) { + SFuncObj *pFunc = (SFuncObj *)calloc(1, sizeof(SFuncObj)); + if (pFunc == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY; + + memcpy(pFunc, pRow->rowData, tsFuncUpdateSize); + pRow->pObj = pFunc; + + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeFuncActionRestored() { + int64_t numOfRows = sdbGetNumOfRows(tsFuncSdb); + + if (numOfRows <= 0 && dnodeIsFirstDeploy()) { + mInfo("dnode first deploy, func restored."); + } + + return TSDB_CODE_SUCCESS; +} + +int32_t mnodeInitFuncs() { + SFuncObj tObj; + tsFuncUpdateSize = (int32_t)((int8_t *)tObj.updateEnd - (int8_t *)&tObj); + + SSdbTableDesc desc = { + .id = SDB_TABLE_FUNC, + .name = "funcs", + .hashSessions = TSDB_DEFAULT_USERS_HASH_SIZE, + .maxRowSize = tsFuncUpdateSize, + .refCountPos = (int32_t)((int8_t *)(&tObj.refCount) - (int8_t *)&tObj), + .keyType = SDB_KEY_STRING, + .fpInsert = mnodeFuncActionInsert, + .fpDelete = mnodeFuncActionDelete, + .fpUpdate = mnodeFuncActionUpdate, + .fpEncode = mnodeFuncActionEncode, + .fpDecode = mnodeFuncActionDecode, + .fpDestroy = mnodeFuncActionDestroy, + .fpRestored = mnodeFuncActionRestored + }; + + tsFuncRid = sdbOpenTable(&desc); + tsFuncSdb = sdbGetTableByRid(tsFuncRid); + if (tsFuncSdb == NULL) { + mError("table:%s, failed to create hash", desc.name); + return -1; + } + + mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_FUNCTION, mnodeProcessCreateFuncMsg); + mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_FUNCTION, mnodeProcessDropFuncMsg); + mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_RETRIEVE_FUNC, mnodeProcessRetrieveFuncImplMsg); + + mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_FUNCTION, mnodeGetFuncMeta); + mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_FUNCTION, mnodeRetrieveFuncs); + mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_FUNCTION, mnodeCancelGetNextFunc); + + mDebug("table:%s, hash is created", desc.name); + + return 0; +} + +void mnodeCleanupFuncs() { + sdbCloseTable(tsFuncRid); + tsFuncSdb = NULL; +} + +SFuncObj *mnodeGetFunc(char *name) { + return (SFuncObj *)sdbGetRow(tsFuncSdb, name); +} + +void *mnodeGetNextFunc(void *pIter, SFuncObj **pFunc) { + return sdbFetchRow(tsFuncSdb, pIter, (void **)pFunc); +} + +void mnodeCancelGetNextFunc(void *pIter) { + sdbFreeIter(tsFuncSdb, pIter); +} + +void mnodeIncFuncRef(SFuncObj *pFunc) { + sdbIncRef(tsFuncSdb, pFunc); +} + +void mnodeDecFuncRef(SFuncObj *pFunc) { + sdbDecRef(tsFuncSdb, pFunc); +} +/* +static int32_t mnodeUpdateFunc(SFuncObj *pFunc, void *pMsg) { + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsFuncSdb, + .pObj = pFunc, + .pMsg = pMsg + }; + + int32_t code = sdbUpdateRow(&row); + if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + mError("func:%s, failed to alter by %s, reason:%s", pFunc->name, mnodeGetUserFromMsg(pMsg), tstrerror(code)); + } else { + mLInfo("func:%s, is altered by %s", pFunc->name, mnodeGetUserFromMsg(pMsg)); + } + + return code; +} +*/ +int32_t mnodeCreateFunc(SAcctObj *pAcct, char *name, int32_t codeLen, char *codeScript, char *path, uint8_t outputType, int16_t outputLen, int32_t funcType, int32_t bufSize, SMnodeMsg *pMsg) { + if (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_GRANT_EXPIRED; + } + + if (!pMsg->pUser->writeAuth) { + return TSDB_CODE_MND_NO_RIGHTS; + } + + int32_t code = acctCheck(pAcct, ACCT_GRANT_USER); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = grantCheck(TSDB_GRANT_USER); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (name[0] == 0) { + return TSDB_CODE_MND_INVALID_FUNC_NAME; + } + + if (codeScript[0] == 0) { + return TSDB_CODE_MND_INVALID_FUNC_CODE; + } + + if (codeLen < 0 || codeLen > TSDB_FUNC_CODE_LEN) { + return TSDB_CODE_MND_INVALID_FUNC_CODE; + } + + if (bufSize < 0 || bufSize > TSDB_FUNC_BUF_SIZE) { + return TSDB_CODE_MND_INVALID_FUNC_BUFSIZE; + } + + SFuncObj *pFunc = mnodeGetFunc(name); + if (pFunc != NULL) { + mDebug("func:%s, is already there", name); + mnodeDecFuncRef(pFunc); + return TSDB_CODE_MND_FUNC_ALREADY_EXIST; + } + + pFunc = calloc(1, sizeof(SFuncObj)); + tstrncpy(pFunc->name, name, TSDB_FUNC_NAME_LEN); + tstrncpy(pFunc->path, path, tListLen(pFunc->path)); + memcpy(pFunc->cont, codeScript, codeLen); + pFunc->contLen = codeLen; + pFunc->createdTime = taosGetTimestampMs(); + pFunc->resType = outputType; + pFunc->resBytes = outputLen; + pFunc->funcType = funcType; + pFunc->bufSize = bufSize; + pFunc->sig = 0; + pFunc->type = 1; //lua script, refactor + + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsFuncSdb, + .pObj = pFunc, + .rowSize = sizeof(SFuncObj), + .pMsg = pMsg + }; + + code = sdbInsertRow(&row); + if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + mError("func:%s, failed to create by %s, reason:%s", pFunc->name, mnodeGetUserFromMsg(pMsg), tstrerror(code)); + tfree(pFunc); + } else { + mLInfo("func:%s, is created by %s", pFunc->name, mnodeGetUserFromMsg(pMsg)); + } + + return code; +} + +static int32_t mnodeDropFunc(SFuncObj *pFunc, void *pMsg) { + SSdbRow row = { + .type = SDB_OPER_GLOBAL, + .pTable = tsFuncSdb, + .pObj = pFunc, + .pMsg = pMsg + }; + + int32_t code = sdbDeleteRow(&row); + if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + mError("func:%s, failed to drop by %s, reason:%s", pFunc->name, mnodeGetUserFromMsg(pMsg), tstrerror(code)); + } else { + mLInfo("func:%s, is dropped by %s", pFunc->name, mnodeGetUserFromMsg(pMsg)); + } + + return code; +} + +static int32_t mnodeGetFuncsNum() { + return (int32_t)sdbGetNumOfRows(tsFuncSdb); +} + +static int32_t mnodeGetFuncMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) { + SUserObj *pUser = mnodeGetUserFromConn(pConn); + if (pUser == NULL) { + return TSDB_CODE_MND_NO_USER_FROM_CONN; + } + + int32_t cols = 0; + SSchema *pSchema = pMeta->schema; + + pShow->bytes[cols] = TSDB_FUNC_NAME_LEN + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "name"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = PATH_MAX + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "path"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "aggregate"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_TYPE_STR_MAX_LEN + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "outputtype"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 8; + pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP; + strcpy(pSchema[cols].name, "create_time"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "code_len"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "bufsize"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pMeta->numOfColumns = htons(cols); + strcpy(pMeta->tableFname, "show funcs"); + pShow->numOfColumns = cols; + + pShow->offset[0] = 0; + for (int32_t i = 1; i < cols; ++i) { + pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1]; + } + + pShow->numOfRows = mnodeGetFuncsNum(); + pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1]; + + mnodeDecUserRef(pUser); + + return 0; +} + +static void* mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int16_t len) { + char *msg = "unknown"; + if (type >= sizeof(tDataTypes)/sizeof(tDataTypes[0])) { + return msg; + } + + if (type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BINARY) { + int32_t bytes = len > 0 ? (int)(len - VARSTR_HEADER_SIZE) : len; + + snprintf(buf, buflen - 1, "%s(%d)", tDataTypes[type].name, type == TSDB_DATA_TYPE_NCHAR ? bytes/4 : bytes); + buf[buflen - 1] = 0; + + return buf; + } + + return tDataTypes[type].name; +} + +static int32_t mnodeRetrieveFuncs(SShowObj *pShow, char *data, int32_t rows, void *pConn) { + int32_t numOfRows = 0; + SFuncObj *pFunc = NULL; + int32_t cols = 0; + char *pWrite; + char buf[TSDB_TYPE_STR_MAX_LEN]; + + while (numOfRows < rows) { + pShow->pIter = mnodeGetNextFunc(pShow->pIter, &pFunc); + if (pFunc == NULL) break; + + cols = 0; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pFunc->name, pShow->bytes[cols]); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pFunc->path, pShow->bytes[cols]); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pFunc->funcType == TSDB_UDF_TYPE_AGGREGATE ? 1 : 0; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, mnodeGenTypeStr(buf, TSDB_TYPE_STR_MAX_LEN, pFunc->resType, pFunc->resBytes), pShow->bytes[cols]); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = pFunc->createdTime; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pFunc->contLen; + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = pFunc->bufSize; + cols++; + + numOfRows++; + mnodeDecFuncRef(pFunc); + } + + mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow); + pShow->numOfReads += numOfRows; + return numOfRows; +} + +static int32_t mnodeProcessCreateFuncMsg(SMnodeMsg *pMsg) { + SCreateFuncMsg *pCreate = pMsg->rpcMsg.pCont; + pCreate->codeLen = htonl(pCreate->codeLen); + pCreate->outputLen = htons(pCreate->outputLen); + pCreate->funcType = htonl(pCreate->funcType); + pCreate->bufSize = htonl(pCreate->bufSize); + + return mnodeCreateFunc(pMsg->pUser->pAcct, pCreate->name, pCreate->codeLen, pCreate->code, pCreate->path, pCreate->outputType, pCreate->outputLen, pCreate->funcType, pCreate->bufSize, pMsg); +} + +static int32_t mnodeProcessDropFuncMsg(SMnodeMsg *pMsg) { + SDropFuncMsg *pDrop = pMsg->rpcMsg.pCont; + + SFuncObj *pFunc = mnodeGetFunc(pDrop->name); + if (pFunc == NULL) { + return TSDB_CODE_MND_INVALID_FUNC; + } + + return mnodeDropFunc(pFunc, pMsg); +} + +static int32_t mnodeProcessRetrieveFuncImplMsg(SMnodeMsg *pMsg) { + SRetrieveFuncMsg *pInfo = pMsg->rpcMsg.pCont; + pInfo->num = htonl(pInfo->num); + + int32_t t = sizeof(SUdfFuncMsg) + (sizeof(SFunctionInfoMsg) + TSDB_FUNC_CODE_LEN) * pInfo->num + 16384; + + SUdfFuncMsg *pFuncMsg = rpcMallocCont(t); + pFuncMsg->num = htonl(pInfo->num); + char* pOutput = pFuncMsg->content; + tstr* name = (tstr*) pInfo->name; + + for(int32_t i = 0; i < pInfo->num; ++i) { + char buf[TSDB_FUNC_NAME_LEN] = {0}; + tstrncpy(buf, name->data, htons(name->len) + 1); + + SFuncObj* pFuncObj = mnodeGetFunc(buf); + if (pFuncObj == NULL) { + mError("function %s does not exist", buf); + return TSDB_CODE_MND_INVALID_FUNC; + } + + SFunctionInfoMsg* pFuncInfo = (SFunctionInfoMsg*) pOutput; + + strcpy(pFuncInfo->name, buf); + pFuncInfo->len = htonl(pFuncObj->contLen); + memcpy(pFuncInfo->content, pFuncObj->cont, pFuncObj->contLen); + + pFuncInfo->funcType = htonl(pFuncObj->funcType); + pFuncInfo->resType = pFuncObj->resType; + pFuncInfo->resBytes = htons(pFuncObj->resBytes); + pFuncInfo->bufSize = htonl(pFuncObj->bufSize); + + pOutput += sizeof(SFunctionInfoMsg) + pFuncObj->contLen; + name =(tstr *)((char *)name + sizeof(*name) + htons(name->len)); + } + + pMsg->rpcRsp.rsp = pFuncMsg; + pMsg->rpcRsp.len = (int32_t)(pOutput - (char*)pFuncMsg); + return TSDB_CODE_SUCCESS; +} diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index 8ce798c8ec2271ecb236d49278f07f974e4bb043..df3c49165ca00262016006e4450eb1ba574a5525 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -32,6 +32,7 @@ #include "mnodeSdb.h" #include "mnodeVgroup.h" #include "mnodeUser.h" +#include "mnodeFunc.h" #include "mnodeTable.h" #include "mnodeCluster.h" #include "mnodeShow.h" @@ -46,6 +47,7 @@ static SStep tsMnodeSteps[] = { {"cluster", mnodeInitCluster, mnodeCleanupCluster}, {"accts", mnodeInitAccts, mnodeCleanupAccts}, {"users", mnodeInitUsers, mnodeCleanupUsers}, + {"funcs", mnodeInitFuncs, mnodeCleanupFuncs}, {"dnodes", mnodeInitDnodes, mnodeCleanupDnodes}, {"dbs", mnodeInitDbs, mnodeCleanupDbs}, {"vgroups", mnodeInitVgroups, mnodeCleanupVgroups}, diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index cbf713af6514ce305d91e7edc6710ba0d51eeeec..2c117310b38632305463438e34d79dc1c439388d 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -32,6 +32,7 @@ #define CONN_KEEP_TIME (tsShellActivityTimer * 3) #define CONN_CHECK_TIME (tsShellActivityTimer * 2) #define QUERY_ID_SIZE 20 +#define QUERY_OBJ_ID_SIZE 10 #define QUERY_STREAM_SAVE_SIZE 20 static SCacheObj *tsMnodeConnCache = NULL; @@ -361,6 +362,30 @@ static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pC pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; + pShow->bytes[cols] = QUERY_OBJ_ID_SIZE + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "sql_obj_id"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "pid"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = TSDB_EP_LEN + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "ep"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + + pShow->bytes[cols] = 4; + pSchema[cols].type = TSDB_DATA_TYPE_INT; + strcpy(pSchema[cols].name, "sub_queries"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + pShow->bytes[cols] = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; strcpy(pSchema[cols].name, "sql"); @@ -434,6 +459,29 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; *(int64_t *)pWrite = htobe64(pDesc->useconds); cols++; + /* + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int64_t *)pWrite = htobe64(pDesc->sqlObjId); + cols++; + */ + snprintf(str, tListLen(str), "0x%08" PRIx64, htobe64(pDesc->sqlObjId)); + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, str, pShow->bytes[cols]); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = htonl(pDesc->pid); + cols++; + + char epBuf[TSDB_EP_LEN + 1] = {0}; + snprintf(epBuf, tListLen(epBuf), "%s:%u", pDesc->fqdn, pConnObj->port); + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, epBuf, pShow->bytes[cols]); + cols++; + + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + *(int32_t *)pWrite = htonl(pDesc->numOfSub); + cols++; pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->sql, pShow->bytes[cols]); diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 897c3a2f0f6c144a7975c73fd2ad57342dc59f7f..7644f4d7339251037fbe244b17e5af1fa9af0484 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -1113,6 +1113,7 @@ static void *sdbWorkerFp(void *pWorker) { void * unUsed; taosBlockSIGPIPE(); + setThreadName("sdbWorker"); while (1) { int32_t numOfMsgs = taosReadAllQitemsFromQset(tsSdbWQset, tsSdbWQall, &unUsed); diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 032c6ee94b5abe422c017d56d0bd3a44d4a1c8cf..6e5cf14b965d166e381dd1d535668580e4041d9d 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -42,6 +42,7 @@ #include "mnodeWrite.h" #include "mnodeRead.h" #include "mnodePeer.h" +#include "mnodeFunc.h" #define ALTER_CTABLE_RETRY_TIMES 3 #define CREATE_CTABLE_RETRY_TIMES 10 @@ -104,6 +105,20 @@ static void mnodeDestroyChildTable(SCTableObj *pTable) { tfree(pTable); } +static char* mnodeGetTableShowPattern(SShowObj *pShow) { + char* pattern = NULL; + if (pShow != NULL && pShow->payloadLen > 0) { + pattern = (char*)malloc(pShow->payloadLen + 1); + if (pattern == NULL) { + terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; + return NULL; + } + memcpy(pattern, pShow->payload, pShow->payloadLen); + pattern[pShow->payloadLen] = 0; + } + return pattern; +} + static int32_t mnodeChildTableActionDestroy(SSdbRow *pRow) { mnodeDestroyChildTable(pRow->pObj); return TSDB_CODE_SUCCESS; @@ -1035,6 +1050,20 @@ static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) { return code; } +static uint64_t mnodeCreateSuperTableUid() { + int64_t us = taosGetTimestampUs(); + uint64_t x = (us & ((((uint64_t)1)<<40) - 1)); + x = x << 24; + + return x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); +} + +static uint64_t mnodeCreateTableUid(int32_t vgId, int32_t tid) { + uint64_t uid = (((uint64_t)vgId) << 48) + ((((uint64_t)tid) & ((1ul << 24) - 1ul)) << 24) + + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + return uid; +} + static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; @@ -1058,17 +1087,16 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_TOO_MANY_COLUMNS; } - SSTableObj * pStable = calloc(1, sizeof(SSTableObj)); + SSTableObj *pStable = calloc(1, sizeof(SSTableObj)); if (pStable == NULL) { mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); return TSDB_CODE_MND_OUT_OF_MEMORY; } - int64_t us = taosGetTimestampUs(); pStable->info.tableId = strdup(pCreate->tableName); pStable->info.type = TSDB_SUPER_TABLE; pStable->createdTime = taosGetTimestampMs(); - pStable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + pStable->uid = mnodeCreateSuperTableUid(); pStable->sversion = 0; pStable->tversion = 0; pStable->numOfColumns = numOfColumns; @@ -1077,7 +1105,8 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { int32_t schemaSize = numOfCols * sizeof(SSchema); pStable->schema = (SSchema *)calloc(1, schemaSize); if (pStable->schema == NULL) { - free(pStable); + tfree(pStable->info.tableId); + tfree(pStable); mError("msg:%p, app:%p table:%s, failed to create, no schema input", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); return TSDB_CODE_MND_INVALID_TABLE_NAME; } @@ -1094,6 +1123,9 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { if (!tIsValidSchema(pStable->schema, pStable->numOfColumns, pStable->numOfTags)) { mError("msg:%p, app:%p table:%s, failed to create table, invalid schema", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableName); + tfree(pStable->info.tableId); + tfree(pStable->schema); + tfree(pStable); return TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG; } @@ -1471,6 +1503,18 @@ static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg) { return TSDB_CODE_MND_FIELD_NOT_EXIST; } + // check exceed max row bytes + int32_t i; + uint32_t nLen = 0; + for (i = 0; i < pStable->numOfColumns; ++i) { + nLen += (pStable->schema[i].colId == col) ? pAlter->schema[0].bytes : pStable->schema[i].bytes; + } + if (nLen > TSDB_MAX_BYTES_PER_ROW) { + mError("msg:%p, app:%p stable:%s, change column, name:%s exceed max row bytes", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, name); + return TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES; + } + // update SSchema *schema = (SSchema *) (pStable->schema + col); ASSERT(schema->type == TSDB_DATA_TYPE_BINARY || schema->type == TSDB_DATA_TYPE_NCHAR); @@ -1603,6 +1647,11 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; char stableName[TSDB_TABLE_NAME_LEN] = {0}; + char* pattern = mnodeGetTableShowPattern(pShow); + if (pShow->payloadLen > 0 && pattern == NULL) { + return 0; + } + while (numOfRows < rows) { pShow->pIter = mnodeGetNextSuperTable(pShow->pIter, &pTable); if (pTable == NULL) break; @@ -1614,7 +1663,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, memset(stableName, 0, tListLen(stableName)); mnodeExtractTableName(pTable->info.tableId, stableName); - if (pShow->payloadLen > 0 && patternMatch(pShow->payload, stableName, sizeof(stableName) - 1, &info) != TSDB_PATTERN_MATCH) { + if (pShow->payloadLen > 0 && patternMatch(pattern, stableName, sizeof(stableName) - 1, &info) != TSDB_PATTERN_MATCH) { mnodeDecTableRef(pTable); continue; } @@ -1654,6 +1703,7 @@ int32_t mnodeRetrieveShowSuperTables(SShowObj *pShow, char *data, int32_t rows, mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow); mnodeDecDbRef(pDb); + free(pattern); return numOfRows; } @@ -1740,16 +1790,22 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) { return TSDB_CODE_SUCCESS; } -static int32_t calculateVgroupMsgLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) { +static int32_t doGetVgroupInfoLength(char* name) { + SSTableObj *pTable = mnodeGetSuperTable(name); + int32_t len = 0; + if (pTable != NULL && pTable->vgHash != NULL) { + len = (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg)); + } + + mnodeDecTableRef(pTable); + return len; +} + +static int32_t getVgroupInfoLength(SSTableVgroupMsg* pInfo, int32_t numOfTable) { int32_t contLen = sizeof(SSTableVgroupRspMsg) + 32 * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg); for (int32_t i = 0; i < numOfTable; ++i) { char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; - SSTableObj *pTable = mnodeGetSuperTable(stableName); - if (pTable != NULL && pTable->vgHash != NULL) { - contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SVgroupMsg) + sizeof(SVgroupsMsg)); - } - - mnodeDecTableRef(pTable); + contLen += doGetVgroupInfoLength(stableName); } return contLen; @@ -1820,7 +1876,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { int32_t numOfTable = htonl(pInfo->numOfTables); // calculate the required space. - int32_t contLen = calculateVgroupMsgLength(pInfo, numOfTable); + int32_t contLen = getVgroupInfoLength(pInfo, numOfTable); SSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen); if (pRsp == NULL) { return TSDB_CODE_MND_OUT_OF_MEMORY; @@ -2061,16 +2117,13 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { } pTable->suid = pMsg->pSTable->uid; - pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) + - ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + pTable->uid = mnodeCreateTableUid(pTable->vgId, pTable->tid); pTable->superTable = pMsg->pSTable; } else { if (pTable->info.type == TSDB_SUPER_TABLE) { - int64_t us = taosGetTimestampUs(); - pTable->uid = (us << 24) + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + pTable->uid = mnodeCreateSuperTableUid(); } else { - pTable->uid = (((uint64_t)pTable->vgId) << 48) + ((((uint64_t)pTable->tid) & ((1ul << 24) - 1ul)) << 24) + - ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); + pTable->uid = mnodeCreateTableUid(pTable->vgId, pTable->tid); } pTable->sversion = 0; @@ -2860,11 +2913,33 @@ static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) { } } +static SMultiTableMeta* ensureMsgBufferSpace(SMultiTableMeta *pMultiMeta, SArray* pList, int32_t* totalMallocLen, int32_t numOfVgroupList) { + int32_t len = 0; + for (int32_t i = 0; i < numOfVgroupList; ++i) { + char *name = taosArrayGetP(pList, i); + len += doGetVgroupInfoLength(name); + } + + if (len + pMultiMeta->contLen > (*totalMallocLen)) { + while (len + pMultiMeta->contLen > (*totalMallocLen)) { + (*totalMallocLen) *= 2; + } + + pMultiMeta = realloc(pMultiMeta, *totalMallocLen); + if (pMultiMeta == NULL) { + return NULL; + } + } + + return pMultiMeta; +} + static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { SMultiTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; pInfo->numOfTables = htonl(pInfo->numOfTables); pInfo->numOfVgroups = htonl(pInfo->numOfVgroups); + pInfo->numOfUdfs = htonl(pInfo->numOfUdfs); int32_t contLen = pMsg->rpcMsg.contLen - sizeof(SMultiTableInfoMsg); @@ -2873,17 +2948,17 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { char* str = strndup(pInfo->tableNames, contLen); char** nameList = strsplit(str, ",", &num); SArray* pList = taosArrayInit(4, POINTER_BYTES); - SMultiTableMeta *pMultiMeta = NULL; - if (num != pInfo->numOfTables + pInfo->numOfVgroups) { + SMultiTableMeta *pMultiMeta = NULL; + if (num != pInfo->numOfTables + pInfo->numOfVgroups + pInfo->numOfUdfs) { mError("msg:%p, app:%p, failed to get multi-tableMeta, msg inconsistent", pMsg, pMsg->rpcMsg.ahandle); code = TSDB_CODE_MND_INVALID_TABLE_NAME; goto _end; } // first malloc 80KB, subsequent reallocation will expand the size as twice of the original size - int32_t totalMallocLen = sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16); - pMultiMeta = rpcMallocCont(totalMallocLen); + int32_t totalMallocLen = sizeof(SMultiTableMeta) + sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16); + pMultiMeta = calloc(1, totalMallocLen); if (pMultiMeta == NULL) { code = TSDB_CODE_MND_OUT_OF_MEMORY; goto _end; @@ -2916,7 +2991,7 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { int remain = totalMallocLen - pMultiMeta->contLen; if (remain <= sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16)) { totalMallocLen *= 2; - pMultiMeta = rpcReallocCont(pMultiMeta, totalMallocLen); + pMultiMeta = realloc(pMultiMeta, totalMallocLen); if (pMultiMeta == NULL) { mnodeDecTableRef(pMsg->pTable); code = TSDB_CODE_MND_OUT_OF_MEMORY; @@ -2950,10 +3025,10 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { } } - char* msg = (char*) pMultiMeta + pMultiMeta->contLen; + int32_t tableNum = pInfo->numOfTables + pInfo->numOfVgroups; // add the additional super table names that needs the vgroup info - for(;t < num; ++t) { + for(;t < tableNum; ++t) { taosArrayPush(pList, &nameList[t]); } @@ -2961,6 +3036,13 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { int32_t numOfVgroupList = (int32_t) taosArrayGetSize(pList); pMultiMeta->numOfVgroup = htonl(numOfVgroupList); + pMultiMeta = ensureMsgBufferSpace(pMultiMeta, pList, &totalMallocLen, numOfVgroupList); + if (pMultiMeta == NULL) { + code = TSDB_CODE_MND_OUT_OF_MEMORY; + goto _end; + } + + char* msg = (char*) pMultiMeta + pMultiMeta->contLen; for(int32_t i = 0; i < numOfVgroupList; ++i) { char* name = taosArrayGetP(pList, i); @@ -2977,20 +3059,77 @@ static int32_t mnodeProcessMultiTableMetaMsg(SMnodeMsg *pMsg) { pMultiMeta->contLen = (int32_t) (msg - (char*) pMultiMeta); pMultiMeta->numOfTables = htonl(pMultiMeta->numOfTables); + + // add the user-defined-function information + for(int32_t i = 0; i < pInfo->numOfUdfs; ++i, ++t) { + char buf[TSDB_FUNC_NAME_LEN] = {0}; + strcpy(buf, nameList[t]); + + SFuncObj* pFuncObj = mnodeGetFunc(buf); + if (pFuncObj == NULL) { + mError("function %s does not exist", buf); + code = TSDB_CODE_MND_INVALID_FUNC; + goto _end; + } + + SFunctionInfoMsg* pFuncInfo = (SFunctionInfoMsg*) msg; + + strcpy(pFuncInfo->name, buf); + pFuncInfo->len = htonl(pFuncObj->contLen); + memcpy(pFuncInfo->content, pFuncObj->cont, pFuncObj->contLen); + + pFuncInfo->funcType = htonl(pFuncObj->funcType); + pFuncInfo->resType = pFuncObj->resType; + pFuncInfo->resBytes = htons(pFuncObj->resBytes); + pFuncInfo->bufSize = htonl(pFuncObj->bufSize); + + msg += sizeof(SFunctionInfoMsg) + pFuncObj->contLen; + } + + pMultiMeta->contLen = (int32_t) (msg - (char*) pMultiMeta); + pMultiMeta->numOfUdf = htonl(pInfo->numOfUdfs); + pMsg->rpcRsp.rsp = pMultiMeta; pMsg->rpcRsp.len = pMultiMeta->contLen; code = TSDB_CODE_SUCCESS; + char* tmp = rpcMallocCont(pMultiMeta->contLen + 2); + if (tmp == NULL) { + code = TSDB_CODE_MND_OUT_OF_MEMORY; + goto _end; + } + + int32_t dataLen = (int32_t)pMultiMeta->contLen - sizeof(SMultiTableMeta); + int32_t len = tsCompressString(pMultiMeta->meta, dataLen, 1, tmp + sizeof(SMultiTableMeta), (int32_t)dataLen + 2, + ONE_STAGE_COMP, NULL, 0); + + pMultiMeta->metaClone = pInfo->metaClone; + pMultiMeta->rawLen = pMultiMeta->contLen; + if (len == -1 || len >= dataLen + 2) { // compress failed, do not compress this binary data + pMultiMeta->compressed = 0; + memcpy(tmp, pMultiMeta, sizeof(SMultiTableMeta) + pMultiMeta->contLen); + } else { + pMultiMeta->compressed = 1; + pMultiMeta->contLen = sizeof(SMultiTableMeta) + len; + + // copy the header and the compressed payload + memcpy(tmp, pMultiMeta, sizeof(SMultiTableMeta)); + } + + pMsg->rpcRsp.rsp = tmp; + pMsg->rpcRsp.len = pMultiMeta->contLen; + + SMultiTableMeta* p = (SMultiTableMeta*) tmp; + + mDebug("multiTable info build completed, original:%d, compressed:%d, comp:%d", p->rawLen, p->contLen, p->compressed); + _end: tfree(str); tfree(nameList); taosArrayDestroy(pList); pMsg->pTable = NULL; pMsg->pVgroup = NULL; - - if (code != TSDB_CODE_SUCCESS) { - rpcFreeCont(pMultiMeta); - } + tfree(pMultiMeta); return code; } @@ -3086,15 +3225,9 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows char prefix[64] = {0}; int32_t prefixLen = (int32_t)tableIdPrefix(pDb->name, prefix, 64); - char* pattern = NULL; - if (pShow->payloadLen > 0) { - pattern = (char*)malloc(pShow->payloadLen + 1); - if (pattern == NULL) { - terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; - return 0; - } - memcpy(pattern, pShow->payload, pShow->payloadLen); - pattern[pShow->payloadLen] = 0; + char* pattern = mnodeGetTableShowPattern(pShow); + if (pShow->payloadLen > 0 && pattern == NULL) { + return 0; } while (numOfRows < rows) { @@ -3326,6 +3459,11 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro strcat(prefix, TS_PATH_DELIMITER); int32_t prefixLen = (int32_t)strlen(prefix); + char* pattern = mnodeGetTableShowPattern(pShow); + if (pShow->payloadLen > 0 && pattern == NULL) { + return 0; + } + while (numOfRows < rows) { pShow->pIter = mnodeGetNextChildTable(pShow->pIter, &pTable); if (pTable == NULL) break; @@ -3341,7 +3479,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro // pattern compare for table name mnodeExtractTableName(pTable->info.tableId, tableName); - if (pShow->payloadLen > 0 && patternMatch(pShow->payload, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) { + if (pShow->payloadLen > 0 && patternMatch(pattern, tableName, sizeof(tableName) - 1, &info) != TSDB_PATTERN_MATCH) { mnodeDecTableRef(pTable); continue; } @@ -3373,6 +3511,7 @@ static int32_t mnodeRetrieveStreamTables(SShowObj *pShow, char *data, int32_t ro mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow); mnodeDecDbRef(pDb); + free(pattern); return numOfRows; } diff --git a/src/os/CMakeLists.txt b/src/os/CMakeLists.txt index 4472c683c70f0e4463ae46c63aaff7c7c7ba0fd6..a64c9d79dd6af511448ad0f9b186f6e50d59c728 100644 --- a/src/os/CMakeLists.txt +++ b/src/os/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 6731ca6d7db9ce72e72a88a1b9dadf76fb8ec87e..e91edaab0f4e68cd0e39b677683fbe4b48eeaa3e 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -29,7 +29,7 @@ extern "C" { #include "osMath.h" #include "osMemory.h" #include "osRand.h" -#include "osSemphone.h" +#include "osSemaphore.h" #include "osSignal.h" #include "osSleep.h" #include "osSocket.h" @@ -37,6 +37,7 @@ extern "C" { #include "osSysinfo.h" #include "osTime.h" #include "osTimer.h" +#include "osSystem.h" void osInit(); diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/os/inc/osDef.h b/src/os/inc/osDef.h index 80617eec56e6a30e96b2bb0c801902e5b6b33cf6..54a4f98254039761d3486267ab2081ac77444489 100644 --- a/src/os/inc/osDef.h +++ b/src/os/inc/osDef.h @@ -132,10 +132,9 @@ extern "C" { #define ASSERT(x) #endif -#ifdef UNUSED -#undefine UNUSED -#endif +#ifndef UNUSED #define UNUSED(x) ((void)(x)) +#endif #ifdef UNUSED_FUNC #undefine UNUSED_FUNC @@ -211,6 +210,25 @@ extern "C" { #define PRIzu "zu" #endif + +#if defined(_TD_LINUX_64) || defined(_TD_LINUX_32) || defined(_TD_MIPS_64) || defined(_TD_ARM_32) || defined(_TD_ARM_64) || defined(_TD_DARWIN_64) + #if defined(_TD_DARWIN_64) + // MacOS + #if !defined(_GNU_SOURCE) + #define setThreadName(name) do { pthread_setname_np((name)); } while (0) + #else + // pthread_setname_np not defined + #define setThreadName(name) + #endif + #else + // Linux, length of name must <= 16 (the last '\0' included) + #define setThreadName(name) do { prctl(PR_SET_NAME, (name)); } while (0) + #endif +#else + // Windows + #define setThreadName(name) +#endif + #ifdef __cplusplus } #endif diff --git a/src/os/inc/osFile.h b/src/os/inc/osFile.h index 262f19ad22c603f02c8e4277761a0ac038a31c7d..86c08c0a3aca1f2e8434aee84c819d0c17060494 100644 --- a/src/os/inc/osFile.h +++ b/src/os/inc/osFile.h @@ -53,8 +53,24 @@ int64_t taosFSendFile(FILE *outfile, FILE *infile, int64_t *offset, int64_t size void taosGetTmpfilePath(const char *fileNamePrefix, char *dstPath); void taosClose(FileFd fd); +#ifdef TAOS_RANDOM_FILE_FAIL + void taosSetRandomFileFailFactor(int32_t factor); + void taosSetRandomFileFailOutput(const char *path); + #ifdef TAOS_RANDOM_FILE_FAIL_TEST + int64_t taosReadFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line); + int64_t taosWriteFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line); + int64_t taosLSeekRandomFail(int32_t fd, int64_t offset, int32_t whence, const char *file, uint32_t line); + #undef taosRead + #undef taosWrite + #undef taosLSeek + #define taosRead(fd, buf, count) taosReadFileRandomFail(fd, buf, count, __FILE__, __LINE__) + #define taosWrite(fd, buf, count) taosWriteFileRandomFail(fd, buf, count, __FILE__, __LINE__) + #define taosLSeek(fd, offset, whence) taosLSeekRandomFail(fd, offset, whence, __FILE__, __LINE__) + #endif +#endif + #ifdef __cplusplus } #endif -#endif \ No newline at end of file +#endif diff --git a/src/os/inc/osInc.h b/src/os/inc/osInc.h index 6a3c754dcc5ff359501bb744886d8c1f216d48c2..9b78110833e73274f82d051cbaa6fd35c90f2a08 100644 --- a/src/os/inc/osInc.h +++ b/src/os/inc/osInc.h @@ -76,7 +76,8 @@ extern "C" { #include #include "osEok.h" #else - #include + #include + #include #include #include #include @@ -84,6 +85,7 @@ extern "C" { #include #include #include + #include #if !(defined(_ALPINE)) #include diff --git a/src/os/inc/osLinux32.h b/src/os/inc/osLinux32.h new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/os/inc/osLinux64.h b/src/os/inc/osLinux64.h new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemaphore.h similarity index 97% rename from src/os/inc/osSemphone.h rename to src/os/inc/osSemaphore.h index fe59095205010bef553413809706c62cd772a7e3..10d14700e013f66e6d98208f0e65fe1ca5fc3874 100644 --- a/src/os/inc/osSemphone.h +++ b/src/os/inc/osSemaphore.h @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_OS_SEMPHONE_H -#define TDENGINE_OS_SEMPHONE_H +#ifndef TDENGINE_OS_SEMAPHORE_H +#define TDENGINE_OS_SEMAPHORE_H #ifdef __cplusplus extern "C" { diff --git a/src/os/inc/osSysinfo.h b/src/os/inc/osSysinfo.h index d136f9664c9d5c00dc68382eaf3ebc6e97013cd0..5f0bc2950c1a7b0e9d2caf57c19f1913c7035980 100644 --- a/src/os/inc/osSysinfo.h +++ b/src/os/inc/osSysinfo.h @@ -27,18 +27,20 @@ typedef struct { } SysDiskSize; int32_t taosGetDiskSize(char *dataDir, SysDiskSize *diskSize); -void taosGetSystemInfo(); -bool taosGetProcIO(float *readKB, float *writeKB); -bool taosGetBandSpeed(float *bandSpeedKb); -void taosGetDisk(); -bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage); -bool taosGetProcMemory(float *memoryUsedMB); -bool taosGetSysMemory(float *memoryUsedMB); -void taosPrintOsInfo(); -int taosSystem(const char *cmd); -void taosKillSystem(); -bool taosGetSystemUid(char *uid); -char * taosGetCmdlineByPID(int pid); + +void taosGetSystemInfo(); +bool taosGetProcIO(float *readKB, float *writeKB); +bool taosGetBandSpeed(float *bandSpeedKb); +void taosGetDisk(); +bool taosGetCpuUsage(float *sysCpuUsage, float *procCpuUsage) ; +bool taosGetProcMemory(float *memoryUsedMB) ; +bool taosGetSysMemory(float *memoryUsedMB); +void taosPrintOsInfo(); +void taosPrintDiskInfo(); +int taosSystem(const char * cmd) ; +void taosKillSystem(); +bool taosGetSystemUid(char *uid); +char *taosGetCmdlineByPID(int pid); void taosSetCoreDump(); diff --git a/src/os/inc/osSystem.h b/src/os/inc/osSystem.h new file mode 100644 index 0000000000000000000000000000000000000000..e7a3ec13ae1b3ff9a94724a4cd28e2b4bb40ac1c --- /dev/null +++ b/src/os/inc/osSystem.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_OS_SYSTEM_H +#define TDENGINE_OS_SYSTEM_H + +#ifdef __cplusplus +extern "C" { +#endif + +void* taosLoadDll(const char *filename); +void* taosLoadSym(void* handle, char* name); +void taosCloseDll(void *handle); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/os/src/darwin/CMakeLists.txt b/src/os/src/darwin/CMakeLists.txt index 259e1a7a0b56a02b7d67825acc85caef5b598089..ed75cac03da112348ff153005d5330786f6386ac 100644 --- a/src/os/src/darwin/CMakeLists.txt +++ b/src/os/src/darwin/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/darwin/darwinSystem.c b/src/os/src/darwin/darwinSystem.c new file mode 100644 index 0000000000000000000000000000000000000000..17cafdd6644ed2adbb0a402689f9f300fa615d03 --- /dev/null +++ b/src/os/src/darwin/darwinSystem.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tglobal.h" +#include "tulog.h" + +void* taosLoadDll(const char *filename) { + return NULL; +} + +void* taosLoadSym(void* handle, char* name) { + return NULL; +} + +void taosCloseDll(void *handle) { +} + + diff --git a/src/os/src/darwin/dwSemphone.c b/src/os/src/darwin/dwSemaphore.c similarity index 99% rename from src/os/src/darwin/dwSemphone.c rename to src/os/src/darwin/dwSemaphore.c index 898410647ad6e23428656f6f820504126268af07..25cb28cff1b6f9c83cab43faf68641717450c0ea 100644 --- a/src/os/src/darwin/dwSemphone.c +++ b/src/os/src/darwin/dwSemaphore.c @@ -41,6 +41,8 @@ static semaphore_t sem_exit; static void* sem_thread_routine(void *arg) { (void)arg; + setThreadName("sem_thrd"); + sem_port = mach_task_self(); kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0); if (ret != KERN_SUCCESS) { diff --git a/src/os/src/darwin/dwSysInfo.c b/src/os/src/darwin/dwSysInfo.c index b3c9bd528e9459a8d5798a2ff6ca4a1664503a90..10e0acc1309d80154a42b6cc948a8afb7a04b668 100644 --- a/src/os/src/darwin/dwSysInfo.c +++ b/src/os/src/darwin/dwSysInfo.c @@ -136,9 +136,6 @@ void taosPrintOsInfo() { // uInfo(" os openMax: %" PRId64, tsOpenMax); // uInfo(" os streamMax: %" PRId64, tsStreamMax); uInfo(" os numOfCores: %d", tsNumOfCores); - uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); - uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); - uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); struct utsname buf; @@ -154,6 +151,14 @@ void taosPrintOsInfo() { uInfo("=================================="); } +void taosPrintDiskInfo() { + uInfo("=================================="); + uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); + uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); + uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); + uInfo("=================================="); +} + void taosKillSystem() { uError("function taosKillSystem, exit!"); exit(0); diff --git a/src/os/src/darwin/dwTimer.c b/src/os/src/darwin/dwTimer.c index ee1becc91af5237296bb2a562982b33cc5c78f10..d395a7f53f2baa9a806ab8cbe96253deb7334f6e 100644 --- a/src/os/src/darwin/dwTimer.c +++ b/src/os/src/darwin/dwTimer.c @@ -32,6 +32,7 @@ static volatile int timer_stop = 0; static void* timer_routine(void *arg) { (void)arg; + setThreadName("timer"); int r = 0; struct timespec to = {0}; diff --git a/src/os/src/detail/CMakeLists.txt b/src/os/src/detail/CMakeLists.txt index 5c49df24c1c7b1e88c0ba206f2d100fe90ed21c6..ac68cf4cd8cbd217da8aa2d4a41a5aa159562868 100644 --- a/src/os/src/detail/CMakeLists.txt +++ b/src/os/src/detail/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(.) diff --git a/src/os/src/detail/osFail.c b/src/os/src/detail/osFail.c index a99bcd01dbccd0290de1fc38a1220b573ab74b22..6ddeefc521264ac23d2fdf2961e20bc8c85a43b1 100644 --- a/src/os/src/detail/osFail.c +++ b/src/os/src/detail/osFail.c @@ -113,7 +113,7 @@ int64_t taosReadFileRandomFail(int32_t fd, void *buf, int32_t count, const char } } - return taosReadImp(fd, buf, count); + return taosRead(fd, buf, count); } int64_t taosWriteFileRandomFail(int32_t fd, void *buf, int32_t count, const char *file, uint32_t line) { @@ -124,7 +124,7 @@ int64_t taosWriteFileRandomFail(int32_t fd, void *buf, int32_t count, const char } } - return taosWriteImp(fd, buf, count); + return taosWrite(fd, buf, count); } int64_t taosLSeekRandomFail(int32_t fd, int64_t offset, int32_t whence, const char *file, uint32_t line) { @@ -135,7 +135,7 @@ int64_t taosLSeekRandomFail(int32_t fd, int64_t offset, int32_t whence, const ch } } - return taosLSeekImp(fd, offset, whence); + return taosLSeek(fd, offset, whence); } #endif //TAOS_RANDOM_FILE_FAIL diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemaphore.c similarity index 100% rename from src/os/src/detail/osSemphone.c rename to src/os/src/detail/osSemaphore.c diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c index d0b284e1cab6717ab7b589557e4545ea744efa33..04b1efe7bf78d57a8806960995b0e34ff79e3abb 100644 --- a/src/os/src/detail/osSysinfo.c +++ b/src/os/src/detail/osSysinfo.c @@ -74,13 +74,14 @@ bool taosGetProcMemory(float *memoryUsedMB) { return false; } + ssize_t _bytes = 0; size_t len; char * line = NULL; while (!feof(fp)) { tfree(line); len = 0; - getline(&line, &len, fp); - if (line == NULL) { + _bytes = getline(&line, &len, fp); + if ((_bytes < 0) || (line == NULL)) { break; } if (strstr(line, "VmRSS:") != NULL) { @@ -113,8 +114,8 @@ static bool taosGetSysCpuInfo(SysCpuInfo *cpuInfo) { size_t len; char * line = NULL; - getline(&line, &len, fp); - if (line == NULL) { + ssize_t _bytes = getline(&line, &len, fp); + if ((_bytes < 0) || (line == NULL)) { uError("read file:%s failed", tsSysCpuFile); fclose(fp); return false; @@ -138,8 +139,8 @@ static bool taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) { size_t len = 0; char * line = NULL; - getline(&line, &len, fp); - if (line == NULL) { + ssize_t _bytes = getline(&line, &len, fp); + if ((_bytes < 0) || (line == NULL)) { uError("read file:%s failed", tsProcCpuFile); fclose(fp); return false; @@ -339,6 +340,7 @@ static bool taosGetCardInfo(int64_t *bytes) { return false; } + ssize_t _bytes = 0; size_t len = 2048; char * line = calloc(1, len); @@ -357,7 +359,12 @@ static bool taosGetCardInfo(int64_t *bytes) { int64_t nouse6 = 0; char nouse0[200] = {0}; - getline(&line, &len, fp); + _bytes = getline(&line, &len, fp); + if (_bytes < 0) + { + break; + } + line[len - 1] = 0; if (strstr(line, "lo:") != NULL) { @@ -420,6 +427,7 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { return false; } + ssize_t _bytes = 0; size_t len; char * line = NULL; char tmp[10]; @@ -428,8 +436,8 @@ static bool taosReadProcIO(int64_t *readbyte, int64_t *writebyte) { while (!feof(fp)) { tfree(line); len = 0; - getline(&line, &len, fp); - if (line == NULL) { + _bytes = getline(&line, &len, fp); + if ((_bytes < 0) || (line == NULL)) { break; } if (strstr(line, "rchar:") != NULL) { @@ -506,9 +514,6 @@ void taosPrintOsInfo() { uInfo(" os openMax: %" PRId64, tsOpenMax); uInfo(" os streamMax: %" PRId64, tsStreamMax); uInfo(" os numOfCores: %d", tsNumOfCores); - uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); - uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); - uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); struct utsname buf; @@ -523,6 +528,14 @@ void taosPrintOsInfo() { uInfo(" os machine: %s", buf.machine); } +void taosPrintDiskInfo() { + uInfo("=================================="); + uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); + uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); + uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); + uInfo("=================================="); +} + void taosKillSystem() { // SIGINT uInfo("taosd will shut down soon"); diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c index 4d64e14d9415a256b9ef5032db3de35bcfeedb79..847d484d9e28fd11d1df3c3b2905afcf257174dd 100644 --- a/src/os/src/detail/osTime.c +++ b/src/os/src/detail/osTime.c @@ -348,6 +348,7 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec {1.0 / 1000000, 1.0 / 1000, 1.} }; return (int64_t)((double)time * factors[fromPrecision][toPrecision]); } + static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) { switch (unit) { diff --git a/src/os/src/detail/osTimer.c b/src/os/src/detail/osTimer.c index b054f08c7842ed405f818b26be6040c543fa7644..c381b3e825f508b17ec0d6a053a574957c5dc365 100644 --- a/src/os/src/detail/osTimer.c +++ b/src/os/src/detail/osTimer.c @@ -38,6 +38,8 @@ static void *taosProcessAlarmSignal(void *tharg) { struct sigevent sevent = {{0}}; + setThreadName("alarmSignal"); + #ifdef _ALPINE sevent.sigev_notify = SIGEV_THREAD; sevent.sigev_value.sival_int = syscall(__NR_gettid); diff --git a/src/os/src/linux/CMakeLists.txt b/src/os/src/linux/CMakeLists.txt index 08b696ba1aedb80ecc17997811591fea1209f1ae..f60c10b65a004735e4b76f5d170a65afc6508c36 100644 --- a/src/os/src/linux/CMakeLists.txt +++ b/src/os/src/linux/CMakeLists.txt @@ -1,7 +1,7 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) ADD_LIBRARY(oslinux ${SRC}) -TARGET_LINK_LIBRARIES(oslinux m rt z) \ No newline at end of file +TARGET_LINK_LIBRARIES(oslinux m rt z dl) \ No newline at end of file diff --git a/src/os/src/linux/linuxEnv.c b/src/os/src/linux/linuxEnv.c index 417513314c7013a3e707999bfbd7f9dbd1a4baa8..b7b268b19e6b6f92babb74cfd3f23793be037cd0 100644 --- a/src/os/src/linux/linuxEnv.c +++ b/src/os/src/linux/linuxEnv.c @@ -25,6 +25,13 @@ void osInit() { strcpy(tsDataDir, "/var/lib/power"); strcpy(tsLogDir, "/var/log/power"); strcpy(tsScriptDir, "/etc/power"); +#elif (_TD_TQ_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "/etc/tq"); + } + strcpy(tsDataDir, "/var/lib/tq"); + strcpy(tsLogDir, "/var/log/tq"); + strcpy(tsScriptDir, "/etc/tq"); #else if (configDir[0] == 0) { strcpy(configDir, "/etc/taos"); diff --git a/src/os/src/linux/osSystem.c b/src/os/src/linux/osSystem.c new file mode 100644 index 0000000000000000000000000000000000000000..052b7a22a8e9fe5dafffc4a90c8efa3643e4a06c --- /dev/null +++ b/src/os/src/linux/osSystem.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tconfig.h" +#include "tglobal.h" +#include "tulog.h" + +void* taosLoadDll(const char *filename) { + void *handle = dlopen (filename, RTLD_LAZY); + if (!handle) { + uError("load dll:%s failed, error:%s", filename, dlerror()); + return NULL; + } + + uDebug("dll %s loaded", filename); + + return handle; +} + +void* taosLoadSym(void* handle, char* name) { + void* sym = dlsym(handle, name); + char* error = NULL; + + if ((error = dlerror()) != NULL) { + uWarn("load sym:%s failed, error:%s", name, dlerror()); + return NULL; + } + + uDebug("sym %s loaded", name) + + return sym; +} + +void taosCloseDll(void *handle) { + if (handle) { + dlclose(handle); + } +} + + diff --git a/src/os/src/windows/CMakeLists.txt b/src/os/src/windows/CMakeLists.txt index e5472e1abd618f27d119efbb926a959bf8c737c6..83012d6e3e5a2e11655f4a1c0742cdd25cccddf2 100644 --- a/src/os/src/windows/CMakeLists.txt +++ b/src/os/src/windows/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) AUX_SOURCE_DIRECTORY(. SRC) diff --git a/src/os/src/windows/wEnv.c b/src/os/src/windows/wEnv.c index 19351eb7c964a4c2a8a4d1d5d4d1c8ec669908dc..b35cb8f040aec5ff4b4fb12665d0842e72958ba1 100644 --- a/src/os/src/windows/wEnv.c +++ b/src/os/src/windows/wEnv.c @@ -31,7 +31,14 @@ void osInit() { strcpy(tsDataDir, "C:/PowerDB/data"); strcpy(tsLogDir, "C:/PowerDB/log"); strcpy(tsScriptDir, "C:/PowerDB/script"); - +#elif (_TD_TQ_ == true) + if (configDir[0] == 0) { + strcpy(configDir, "C:/TQ/cfg"); + } + strcpy(tsVnodeDir, "C:/TQ/data"); + strcpy(tsDataDir, "C:/TQ/data"); + strcpy(tsLogDir, "C:/TQ/log"); + strcpy(tsScriptDir, "C:/TQ/script"); #else if (configDir[0] == 0) { strcpy(configDir, "C:/TDengine/cfg"); @@ -48,9 +55,10 @@ void osInit() { strcpy(tsOsName, "Windows"); const char *tmpDir = getenv("tmp"); - if (tmpDir != NULL) { + if (tmpDir == NULL) { tmpDir = getenv("temp"); } + if (tmpDir != NULL) { strcpy(tsTempDir, tmpDir); } else { diff --git a/src/os/src/windows/wSemphone.c b/src/os/src/windows/wSemaphore.c similarity index 100% rename from src/os/src/windows/wSemphone.c rename to src/os/src/windows/wSemaphore.c diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c index 8a81e3079a17e013372ebd7de0facb6b49a99c7b..72793a1049506fed0fce2d1a04c576097fec9fba 100644 --- a/src/os/src/windows/wSysinfo.c +++ b/src/os/src/windows/wSysinfo.c @@ -205,10 +205,15 @@ void taosGetSystemInfo() { void taosPrintOsInfo() { uInfo(" os numOfCores: %d", tsNumOfCores); + uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); + uInfo("=================================="); +} + +void taosPrintDiskInfo() { + uInfo("=================================="); uInfo(" os totalDisk: %f(GB)", tsTotalDataDirGB); uInfo(" os usedDisk: %f(GB)", tsUsedDataDirGB); uInfo(" os availDisk: %f(GB)", tsAvailDataDirGB); - uInfo(" os totalMemory: %d(MB)", tsTotalMemoryMB); uInfo("=================================="); } diff --git a/src/os/src/windows/wSystem.c b/src/os/src/windows/wSystem.c new file mode 100644 index 0000000000000000000000000000000000000000..17cafdd6644ed2adbb0a402689f9f300fa615d03 --- /dev/null +++ b/src/os/src/windows/wSystem.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tglobal.h" +#include "tulog.h" + +void* taosLoadDll(const char *filename) { + return NULL; +} + +void* taosLoadSym(void* handle, char* name) { + return NULL; +} + +void taosCloseDll(void *handle) { +} + + diff --git a/src/os/tests/CMakeLists.txt b/src/os/tests/CMakeLists.txt index b00f0ebdc85a24974fd36228ff69ced16a32b76d..3c477641899994bf34237e93122c3d83f0365fad 100644 --- a/src/os/tests/CMakeLists.txt +++ b/src/os/tests/CMakeLists.txt @@ -1,10 +1,11 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) -FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) +FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) +FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) -IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) +IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build unit test") # GoogleTest requires at least C++11 @@ -17,4 +18,4 @@ IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) ADD_EXECUTABLE(osTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(osTest taos os tutil common gtest pthread) -ENDIF() \ No newline at end of file +ENDIF() diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index 7dcaaf27e615ead75e83630788288a27e938b0a9..320445f7f784884f8aa009e37182fc57a38bb96f 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) ADD_SUBDIRECTORY(monitor) diff --git a/src/plugins/http/CMakeLists.txt b/src/plugins/http/CMakeLists.txt index bfb47ad12e8b1ef7099109ecf5849ec3575caf5f..57fc2ee3a2692c239d7fa36d6e55ddae738a2720 100644 --- a/src/plugins/http/CMakeLists.txt +++ b/src/plugins/http/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/zlib-1.2.11/inc) diff --git a/src/plugins/http/inc/httpParser.h b/src/plugins/http/inc/httpParser.h index c7cf7a9608bf1c25fa3fdec16810ff6df07d28f8..f7b55958c88bf9961a6a57e14aa2de286d004ece 100644 --- a/src/plugins/http/inc/httpParser.h +++ b/src/plugins/http/inc/httpParser.h @@ -17,7 +17,7 @@ #define HTTP_PARSER_H #include "httpGzip.h" -#define HTTP_MAX_URL 5 // http url stack size +#define HTTP_MAX_URL 6 // http url stack size #define HTTP_CODE_CONTINUE 100 #define HTTP_CODE_SWITCHING_PROTOCOL 101 @@ -100,6 +100,7 @@ typedef enum HTTP_PARSER_STATE { HTTP_PARSER_CHUNK, HTTP_PARSER_END, HTTP_PARSER_ERROR, + HTTP_PARSER_OPTIONAL_SP } HTTP_PARSER_STATE; typedef enum HTTP_AUTH_TYPE { diff --git a/src/plugins/http/inc/httpSql.h b/src/plugins/http/inc/httpSql.h index db3e3a3b163a249aa675839f30db4d14411eb6d6..325545af47aa5dc8810eee0172f197f14b19c442 100644 --- a/src/plugins/http/inc/httpSql.h +++ b/src/plugins/http/inc/httpSql.h @@ -35,4 +35,7 @@ void httpTrimTableName(char *name); int32_t httpShrinkTableName(HttpContext *pContext, int32_t pos, char *name); char * httpGetCmdsString(HttpContext *pContext, int32_t pos); +int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql); +void httpCheckFreeEscapedSql(char *oldSql, char *newSql); + #endif diff --git a/src/plugins/http/src/httpGcHandle.c b/src/plugins/http/src/httpGcHandle.c index 925c74e7cd46751ada1e04873710a329e77bded0..883afcc4ecba7d4959cb6dfe1cc7f8c1d83b7cae 100644 --- a/src/plugins/http/src/httpGcHandle.c +++ b/src/plugins/http/src/httpGcHandle.c @@ -176,6 +176,16 @@ bool gcProcessQueryRequest(HttpContext* pContext) { return false; } +#define ESCAPE_ERROR_PROC(code, context, root) \ + do { \ + if (code != TSDB_CODE_SUCCESS) { \ + httpSendErrorResp(context, code); \ + \ + cJSON_Delete(root); \ + return false; \ + } \ + } while (0) + for (int32_t i = 0; i < size; ++i) { cJSON* query = cJSON_GetArrayItem(root, i); if (query == NULL) continue; @@ -186,7 +196,14 @@ bool gcProcessQueryRequest(HttpContext* pContext) { continue; } - int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, refId->valuestring); + char *newStr = NULL; + int32_t retCode = 0; + + retCode = httpCheckAllocEscapeSql(refId->valuestring, &newStr); + ESCAPE_ERROR_PROC(retCode, pContext, root); + + int32_t refIdBuffer = httpAddToSqlCmdBuffer(pContext, newStr); + httpCheckFreeEscapedSql(refId->valuestring, newStr); if (refIdBuffer == -1) { httpWarn("context:%p, fd:%d, user:%s, refId buffer is full", pContext, pContext->fd, pContext->user); break; @@ -195,7 +212,11 @@ bool gcProcessQueryRequest(HttpContext* pContext) { cJSON* alias = cJSON_GetObjectItem(query, "alias"); int32_t aliasBuffer = -1; if (!(alias == NULL || alias->valuestring == NULL || strlen(alias->valuestring) == 0)) { - aliasBuffer = httpAddToSqlCmdBuffer(pContext, alias->valuestring); + retCode = httpCheckAllocEscapeSql(alias->valuestring, &newStr); + ESCAPE_ERROR_PROC(retCode, pContext, root); + + aliasBuffer = httpAddToSqlCmdBuffer(pContext, newStr); + httpCheckFreeEscapedSql(alias->valuestring, newStr); if (aliasBuffer == -1) { httpWarn("context:%p, fd:%d, user:%s, alias buffer is full", pContext, pContext->fd, pContext->user); break; @@ -211,7 +232,11 @@ bool gcProcessQueryRequest(HttpContext* pContext) { continue; } - int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, sql->valuestring); + retCode = httpCheckAllocEscapeSql(sql->valuestring, &newStr); + ESCAPE_ERROR_PROC(retCode, pContext, root); + + int32_t sqlBuffer = httpAddToSqlCmdBuffer(pContext, newStr); + httpCheckFreeEscapedSql(sql->valuestring, newStr); if (sqlBuffer == -1) { httpWarn("context:%p, fd:%d, user:%s, sql buffer is full", pContext, pContext->fd, pContext->user); break; @@ -237,6 +262,8 @@ bool gcProcessQueryRequest(HttpContext* pContext) { } } +#undef ESCAPE_ERROR_PROC + pContext->reqType = HTTP_REQTYPE_MULTI_SQL; pContext->encodeMethod = &gcQueryMethod; pContext->multiCmds->pos = 0; diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index 397791706d0fc24f250c2332dddc5b0b031a4817..2d361d37940a93c3627ef53883a342d12183e6a1 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -199,7 +199,7 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, for (int32_t i = dataFields; i >= 0; i--) { httpJsonItemToken(jsonBuf); - if (row[i] == NULL) { + if (row == NULL || i >= num_fields || row[i] == NULL) { httpJsonOriginString(jsonBuf, "null", 4); continue; } @@ -228,13 +228,11 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, case TSDB_DATA_TYPE_NCHAR: httpJsonStringForTransMean(jsonBuf, (char *)row[i], fields[i].bytes); break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (precision == TSDB_TIME_PRECISION_MILLI) { // ms - httpJsonInt64(jsonBuf, *((int64_t *)row[i])); - } else { - httpJsonInt64(jsonBuf, *((int64_t *)row[i]) / 1000); - } + case TSDB_DATA_TYPE_TIMESTAMP: { + int64_t ts = convertTimePrecision(*((int64_t *)row[i]), precision, TSDB_TIME_PRECISION_MILLI); + httpJsonInt64(jsonBuf, ts); break; + } default: httpJsonString(jsonBuf, "-", 1); break; diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c index df4c4fbc63d469c22ea13273564bec49562f35a9..4bd66a17a3ed3c9f6ba042c8ad96d0ffdfcafb5d 100644 --- a/src/plugins/http/src/httpJson.c +++ b/src/plugins/http/src/httpJson.c @@ -264,8 +264,7 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) { void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { char ts[35] = {0}; - struct tm* ptm; - + int32_t fractionLen; char* format = NULL; time_t quot = 0; @@ -297,11 +296,13 @@ void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { } default: + fractionLen = 0; assert(false); } - ptm = localtime("); - int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm); + struct tm ptm = {0}; + localtime_r(", &ptm); + int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", &ptm); length += snprintf(ts + length, fractionLen, format, mod); httpJsonString(buf, ts, length); @@ -342,6 +343,7 @@ void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) { } default: + fractionLen = 0; assert(false); } diff --git a/src/plugins/http/src/httpParser.c b/src/plugins/http/src/httpParser.c index a68a9bd7ed93cfc8f36fb9b1249a6c7cebf58d6e..ba88a2b9cd95b2b976fb204abbf38b0741dd9b9c 100644 --- a/src/plugins/http/src/httpParser.c +++ b/src/plugins/http/src/httpParser.c @@ -744,6 +744,15 @@ static int32_t httpParserOnSp(HttpParser *parser, HTTP_PARSER_STATE state, const return ok; } +static int32_t httpParserOnOptionalSp(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) { + int32_t ok = 0; + if (c != ' ') { + *again = 1; + httpPopStack(parser); + } + return ok; +} + static int32_t httpParserOnStatusCode(HttpParser *parser, HTTP_PARSER_STATE state, const char c, int32_t *again) { HttpContext *pContext = parser->pContext; int32_t ok = 0; @@ -867,7 +876,7 @@ static int32_t httpParserOnHeader(HttpParser *parser, HTTP_PARSER_STATE state, c } httpPushStack(parser, HTTP_PARSER_CRLF); httpPushStack(parser, HTTP_PARSER_HEADER_VAL); - httpPushStack(parser, HTTP_PARSER_SP); + httpPushStack(parser, HTTP_PARSER_OPTIONAL_SP); httpPushStack(parser, HTTP_PARSER_HEADER_KEY); break; } @@ -1061,6 +1070,10 @@ static int32_t httpParseChar(HttpParser *parser, const char c, int32_t *again) { ok = httpParserOnSp(parser, state, c, again); break; } + if (state == HTTP_PARSER_OPTIONAL_SP) { + ok = httpParserOnOptionalSp(parser, state, c, again); + break; + } if (state == HTTP_PARSER_STATUS_CODE) { ok = httpParserOnStatusCode(parser, state, c, again); break; diff --git a/src/plugins/http/src/httpQueue.c b/src/plugins/http/src/httpQueue.c index 7f7ce404600b96b0efe8385ea0aed7be412a6ba0..677ab0c91d4a6e446eae6dd5213788166b2c40bf 100644 --- a/src/plugins/http/src/httpQueue.c +++ b/src/plugins/http/src/httpQueue.c @@ -70,6 +70,8 @@ static void *httpProcessResultQueue(void *param) { int32_t type; void * unUsed; + setThreadName("httpResultQ"); + while (1) { if (taosReadQitemFromQset(tsHttpQset, &type, (void **)&pMsg, &unUsed) == 0) { httpDebug("qset:%p, http queue got no message from qset, exiting", tsHttpQset); diff --git a/src/plugins/http/src/httpRestHandle.c b/src/plugins/http/src/httpRestHandle.c index 8728b9e37a1561dd545c6878756dcfbc909a27fd..a285670d20ae1e5a9c8bd6c41971fc57cda3320a 100644 --- a/src/plugins/http/src/httpRestHandle.c +++ b/src/plugins/http/src/httpRestHandle.c @@ -119,6 +119,90 @@ bool restProcessSqlRequest(HttpContext* pContext, int32_t timestampFmt) { return true; } +#define REST_FUNC_URL_POS 2 +#define REST_OUTP_URL_POS 3 +#define REST_AGGR_URL_POS 4 +#define REST_BUFF_URL_POS 5 + +#define HTTP_FUNC_LEN 32 +#define HTTP_OUTP_LEN 16 +#define HTTP_AGGR_LEN 2 +#define HTTP_BUFF_LEN 32 + +static int udfSaveFile(const char *fname, const char *content, long len) { + int fd = open(fname, O_WRONLY | O_CREAT | O_EXCL | O_BINARY, 0755); + if (fd < 0) + return -1; + if (taosWrite(fd, (void *)content, len) < 0) + return -1; + + return 0; +} + +static bool restProcessUdfRequest(HttpContext* pContext) { + HttpParser* pParser = pContext->parser; + if (pParser->path[REST_FUNC_URL_POS].pos >= HTTP_FUNC_LEN || pParser->path[REST_FUNC_URL_POS].pos <= 0) { + return false; + } + + if (pParser->path[REST_OUTP_URL_POS].pos >= HTTP_OUTP_LEN || pParser->path[REST_OUTP_URL_POS].pos <= 0) { + return false; + } + + if (pParser->path[REST_AGGR_URL_POS].pos >= HTTP_AGGR_LEN || pParser->path[REST_AGGR_URL_POS].pos <= 0) { + return false; + } + + if (pParser->path[REST_BUFF_URL_POS].pos >= HTTP_BUFF_LEN || pParser->path[REST_BUFF_URL_POS].pos <= 0) { + return false; + } + + char* sql = pContext->parser->body.str; + int len = pContext->parser->body.pos; + if (sql == NULL) { + httpSendErrorResp(pContext, TSDB_CODE_HTTP_NO_SQL_INPUT); + return false; + } + + char udfDir[256] = {0}; + char buf[64] = "udf-"; + char funcName[64] = {0}; + int aggr = 0; + char outputType[16] = {0}; + char buffSize[32] = {0}; + + tstrncpy(funcName, pParser->path[REST_FUNC_URL_POS].str, HTTP_FUNC_LEN); + tstrncpy(buf + 4, funcName, HTTP_FUNC_LEN); + + if (pParser->path[REST_AGGR_URL_POS].str[0] != '0') { + aggr = 1; + } + + tstrncpy(outputType, pParser->path[REST_OUTP_URL_POS].str, HTTP_OUTP_LEN); + tstrncpy(buffSize, pParser->path[REST_BUFF_URL_POS].str, HTTP_BUFF_LEN); + + taosGetTmpfilePath(funcName, udfDir); + + udfSaveFile(udfDir, sql, len); + + tfree(sql); + pContext->parser->body.str = malloc(1024); + sql = pContext->parser->body.str; + int sqlLen = sprintf(sql, "create %s function %s as \"%s\" outputtype %s bufsize %s", + aggr == 1 ? "aggregate" : " ", funcName, udfDir, outputType, buffSize); + + pContext->parser->body.pos = sqlLen; + pContext->parser->body.size = sqlLen + 1; + + HttpSqlCmd* cmd = &(pContext->singleCmd); + cmd->nativSql = sql; + + pContext->reqType = HTTP_REQTYPE_SINGLE_SQL; + pContext->encodeMethod = &restEncodeSqlLocalTimeStringMethod; + + return true; +} + bool restProcessRequest(struct HttpContext* pContext) { if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "login")) { restGetUserFromUrl(pContext); @@ -138,6 +222,8 @@ bool restProcessRequest(struct HttpContext* pContext) { return restProcessSqlRequest(pContext, REST_TIMESTAMP_FMT_UTC_STRING); } else if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "login")) { return restProcessLoginRequest(pContext); + } else if (httpUrlMatch(pContext, REST_ACTION_URL_POS, "udf")) { + return restProcessUdfRequest(pContext); } else { } diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c index 9d98d3f11300fa8143f9a32718c6a865b9a59a97..f02859f165499b0c69b095599dd47890e644c604 100644 --- a/src/plugins/http/src/httpServer.c +++ b/src/plugins/http/src/httpServer.c @@ -117,6 +117,7 @@ static void httpProcessHttpData(void *param) { int32_t fdNum; taosSetMaskSIGPIPE(); + setThreadName("httpData"); while (1) { struct epoll_event events[HTTP_MAX_EVENTS]; @@ -208,6 +209,7 @@ static void *httpAcceptHttpConnection(void *arg) { int32_t totalFds = 0; taosSetMaskSIGPIPE(); + setThreadName("httpAcceptConn"); pServer->fd = taosOpenTcpServerSocket(pServer->serverIp, pServer->serverPort); diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 5a0480b6943d385b94acc0860534db09a5c24353..c2e723732a0f9d786994527c6cd1ac77f273a736 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -423,3 +423,65 @@ void httpProcessRequest(HttpContext *pContext) { httpExecCmd(pContext); } } + +int32_t httpCheckAllocEscapeSql(char *oldSql, char **newSql) +{ + char *pos; + + if (oldSql == NULL || newSql == NULL) { + return TSDB_CODE_SUCCESS; + } + + /* bad sql clause */ + pos = strstr(oldSql, "%%"); + if (pos) { + httpError("bad sql:%s", oldSql); + return TSDB_CODE_HTTP_REQUEST_JSON_ERROR; + } + + pos = strchr(oldSql, '%'); + if (pos == NULL) { + httpDebug("sql:%s", oldSql); + *newSql = oldSql; + return TSDB_CODE_SUCCESS; + } + + *newSql = (char *) calloc(1, (strlen(oldSql) << 1) + 1); + if (newSql == NULL) { + httpError("failed to allocate for new sql, old sql:%s", oldSql); + return TSDB_CODE_HTTP_NO_ENOUGH_MEMORY; + } + + char *src = oldSql; + char *dst = *newSql; + size_t sqlLen = strlen(src); + + while (1) { + memcpy(dst, src, pos - src + 1); + dst += pos - src + 1; + *dst++ = '%'; + + if (pos + 1 >= oldSql + sqlLen) { + break; + } + + src = ++pos; + pos = strchr(pos, '%'); + if (pos == NULL) { + memcpy(dst, src, strlen(src)); + break; + } + } + + return TSDB_CODE_SUCCESS; +} + +void httpCheckFreeEscapedSql(char *oldSql, char *newSql) +{ + if (oldSql && newSql) { + if (oldSql != newSql) { + free(newSql); + } + } +} + diff --git a/src/plugins/http/src/httpTgHandle.c b/src/plugins/http/src/httpTgHandle.c index 69ac3e19c5483d3514d95530404f9a67e6fd11cc..32516b9fd1b5126c4e60a547bb1568d4d063d683 100644 --- a/src/plugins/http/src/httpTgHandle.c +++ b/src/plugins/http/src/httpTgHandle.c @@ -610,7 +610,18 @@ bool tgProcessSingleMetric(HttpContext *pContext, cJSON *metric, char *db) { // stable tag for detail for (int32_t i = 0; i < orderTagsLen; ++i) { cJSON *tag = orderedTags[i]; - stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tag->string); + + char *tagStr = NULL; + int32_t retCode = httpCheckAllocEscapeSql(tag->string, &tagStr); + if (retCode != TSDB_CODE_SUCCESS) { + httpSendErrorResp(pContext, retCode); + + return false; + } + + stable_cmd->tagNames[i] = table_cmd->tagNames[i] = httpAddToSqlCmdBuffer(pContext, tagStr); + + httpCheckFreeEscapedSql(tag->string, tagStr); if (tag->type == cJSON_String) stable_cmd->tagValues[i] = table_cmd->tagValues[i] = httpAddToSqlCmdBuffer(pContext, "'%s'", tag->valuestring); diff --git a/src/plugins/monitor/CMakeLists.txt b/src/plugins/monitor/CMakeLists.txt index 28c62a099c0f2bea8b33a57c577bc89c7fb15aaa..8a05d63e141facfe34e740887384fec0337534d4 100644 --- a/src/plugins/monitor/CMakeLists.txt +++ b/src/plugins/monitor/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/plugins/monitor/src/monMain.c b/src/plugins/monitor/src/monMain.c index 2c4a0c1a4c56e560a9ffe5ec007cc51f9d7f9e24..960a097f5d99bb430f888d01960a879e80456d31 100644 --- a/src/plugins/monitor/src/monMain.c +++ b/src/plugins/monitor/src/monMain.c @@ -114,6 +114,7 @@ int32_t monStartSystem() { static void *monThreadFunc(void *param) { monDebug("starting to initialize monitor module ..."); + setThreadName("monThrd"); while (1) { static int32_t accessTimes = 0; diff --git a/src/plugins/mqtt/CMakeLists.txt b/src/plugins/mqtt/CMakeLists.txt index 50b0bbe8af4faeab41a7b041d6aa51747f0aab3e..081512138505ab7e7a54a8bbe770aa293adec0be 100644 --- a/src/plugins/mqtt/CMakeLists.txt +++ b/src/plugins/mqtt/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/plugins/mqtt/src/mqttSystem.c b/src/plugins/mqtt/src/mqttSystem.c index eacc3b1f74cdfc0d00bb53a38beb8c85b164e200..e0f2f393bb10706daadca35715d7e0ae68d7c436 100644 --- a/src/plugins/mqtt/src/mqttSystem.c +++ b/src/plugins/mqtt/src/mqttSystem.c @@ -100,6 +100,8 @@ void mqttPublishCallback(void** unused, struct mqtt_response_publish* published) } void* mqttClientRefresher(void* client) { + setThreadName("mqttCliRefresh"); + while (tsMqttIsRuning) { mqtt_sync((struct mqtt_client*)client); taosMsleep(100); @@ -141,4 +143,4 @@ void mqttReconnectClient(struct mqtt_client* client, void** unused) { mqtt_reinit(client, sockfd, tsMqttStatus.sendbuf, tsMqttStatus.sendbufsz, tsMqttStatus.recvbuf, tsMqttStatus.recvbufsz); mqtt_connect(client, tsMqttClientId, NULL, NULL, 0, tsMqttUser, tsMqttPass, MQTT_CONNECT_CLEAN_SESSION, 400); mqtt_subscribe(client, tsMqttTopic, 0); -} \ No newline at end of file +} diff --git a/src/query/CMakeLists.txt b/src/query/CMakeLists.txt index f23ac7dd86932ba42dde7c2891865f7dff546a00..fd730adee56c3d5edddb943303f5b6b24d9f019c 100644 --- a/src/query/CMakeLists.txt +++ b/src/query/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/tsdb/inc) @@ -8,14 +8,14 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(query ${SRC}) SET_SOURCE_FILES_PROPERTIES(src/sql.c PROPERTIES COMPILE_FLAGS -w) -TARGET_LINK_LIBRARIES(query tsdb tutil) +TARGET_LINK_LIBRARIES(query tsdb tutil lua) IF (TD_LINUX) - TARGET_LINK_LIBRARIES(query m rt) + TARGET_LINK_LIBRARIES(query m rt lua) ADD_SUBDIRECTORY(tests) ENDIF () IF (TD_DARWIN) - TARGET_LINK_LIBRARIES(query m) + TARGET_LINK_LIBRARIES(query m lua) ADD_SUBDIRECTORY(tests) ENDIF () diff --git a/src/query/inc/qAggMain.h b/src/query/inc/qAggMain.h index 57e7d2982f73432b9559c4ec231cf72e9680363e..d4116fbfb2daec9b47c4a891c3c886728e6ca515 100644 --- a/src/query/inc/qAggMain.h +++ b/src/query/inc/qAggMain.h @@ -27,6 +27,7 @@ extern "C" { #include "trpc.h" #include "tvariant.h" #include "tsdb.h" +#include "qUdf.h" #define TSDB_FUNC_INVALID_ID -1 #define TSDB_FUNC_COUNT 0 @@ -201,10 +202,8 @@ typedef struct SAggFunctionInfo { int8_t stableFuncId; // transfer function for super table query uint16_t status; - bool (*init)(SQLFunctionCtx *pCtx); // setup the execute environment - + bool (*init)(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultCellInfo); // setup the execute environment void (*xFunction)(SQLFunctionCtx *pCtx); // blocks version function - void (*xFunctionF)(SQLFunctionCtx *pCtx, int32_t position); // single-row function version, todo merge with blockwise function // finalizer must be called after all xFunction has been executed to generated final result. void (*xFinalize)(SQLFunctionCtx *pCtx); @@ -216,7 +215,7 @@ typedef struct SAggFunctionInfo { #define GET_RES_INFO(ctx) ((ctx)->resultInfo) int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, - int16_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable); + int16_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo); int32_t isValidFunction(const char* name, int32_t len); #define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0) @@ -262,9 +261,9 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, int32_t bufLen) { pResInfo->initialized = true; // the this struct has been initialized flag - pResInfo->complete = false; + pResInfo->complete = false; pResInfo->hasResult = false; - pResInfo->numOfRes = 0; + pResInfo->numOfRes = 0; memset(GET_ROWCELL_INTERBUF(pResInfo), 0, bufLen); } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 955dd734cf58582ecfe1bbf9871525be0ebc161c..ce70a9ba4ab4d2dee8cab97142cac96a9a41e9cf 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -29,6 +29,7 @@ #include "tarray.h" #include "tlockfree.h" #include "tsdb.h" +#include "qUdf.h" struct SColumnFilterElem; typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, const char* val1, const char* val2, int16_t type); @@ -73,14 +74,14 @@ typedef struct SResultRowPool { typedef struct SResultRow { int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer - int32_t offset:29; // row index in buffer page + int32_t offset:29; // row index in buffer page bool startInterp; // the time window start timestamp has done the interpolation already. bool endInterp; // the time window end timestamp has done the interpolation already. bool closed; // this result status: closed or opened uint32_t numOfRows; // number of rows of current time window SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo - STimeWindow win; - char* key; // start key of current result row + STimeWindow win; + char *key; // start key of current result row } SResultRow; typedef struct SGroupResInfo { @@ -105,8 +106,7 @@ typedef struct SResultRowInfo { int16_t type:8; // data type for hash key int32_t size:24; // number of result set int32_t capacity; // max capacity - int32_t curIndex; // current start active index - int64_t prevSKey; // previous (not completed) sliding window start key + int32_t curPos; // current active result row index of pResult list } SResultRowInfo; typedef struct SColumnFilterElem { @@ -118,6 +118,7 @@ typedef struct SColumnFilterElem { typedef struct SSingleColumnFilterInfo { void* pData; + void* pData2; //used for nchar column int32_t numOfFilters; SColumnInfo info; SColumnFilterElem* pFilters; @@ -133,6 +134,28 @@ typedef struct STableQueryInfo { SResultRowInfo resInfo; } STableQueryInfo; +typedef enum { + QUERY_PROF_BEFORE_OPERATOR_EXEC = 0, + QUERY_PROF_AFTER_OPERATOR_EXEC, + QUERY_PROF_QUERY_ABORT +} EQueryProfEventType; + +typedef struct { + EQueryProfEventType eventType; + int64_t eventTime; + + union { + uint8_t operatorType; //for operator event + int32_t abortCode; //for query abort event + }; +} SQueryProfEvent; + +typedef struct { + uint8_t operatorType; + int64_t sumSelfTime; + int64_t sumRunTimes; +} SOperatorProfResult; + typedef struct SQueryCostInfo { uint64_t loadStatisTime; uint64_t loadFileBlockTime; @@ -154,6 +177,9 @@ typedef struct SQueryCostInfo { uint64_t tableInfoSize; uint64_t hashSize; uint64_t numOfTimeWindows; + + SArray* queryProfEvents; //SArray + SHashObj* operatorProfResults; //map } SQueryCostInfo; typedef struct { @@ -190,8 +216,9 @@ typedef struct SQueryAttr { bool simpleAgg; bool pointInterpQuery; // point interpolation query bool needReverseScan; // need reverse scan - bool distinctTag; // distinct tag query + bool distinct; // distinct query or not bool stateWindow; // window State on sub/normal table + bool createFilterOperator; // if filter operator is needed int32_t interBufSize; // intermediate buffer sizse int32_t havingNum; // having expr number @@ -231,6 +258,7 @@ typedef struct SQueryAttr { SMemRef memRef; STableGroupInfo tableGroupInfo; // table list SArray int32_t vgId; + SArray *pUdfInfo; // no need to free } SQueryAttr; typedef SSDataBlock* (*__operator_fn_t)(void* param, bool* newgroup); @@ -250,6 +278,7 @@ typedef struct SQueryRuntimeEnv { bool enableGroupData; SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file SHashObj* pResultRowHashTable; // quick locate the window object for each result + SHashObj* pResultRowListSet; // used to check if current ResultRowInfo has ResultRow object or not char* keyBuf; // window key buffer SResultRowPool* pool; // window result object pool char** prevRow; @@ -270,6 +299,7 @@ typedef struct SQueryRuntimeEnv { STableQueryInfo *current; SRspResultInfo resultInfo; SHashObj *pTableRetrieveTsMap; + SUdfInfo *pUdfInfo; } SQueryRuntimeEnv; enum { @@ -285,7 +315,7 @@ enum OPERATOR_TYPE_E { OP_TagScan = 4, OP_TableBlockInfoScan= 5, OP_Aggregate = 6, - OP_Arithmetic = 7, + OP_Project = 7, OP_Groupby = 8, OP_Limit = 9, OP_SLimit = 10, @@ -295,7 +325,7 @@ enum OPERATOR_TYPE_E { OP_MultiTableAggregate = 14, OP_MultiTableTimeInterval = 15, OP_DummyInput = 16, //TODO remove it after fully refactor. - OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream. + OP_MultiwayMergeSort = 17, // multi-way data merge into one input stream. OP_GlobalAggregate = 18, // global merge for the multi-way data sources. OP_Filter = 19, OP_Distinct = 20, @@ -365,6 +395,7 @@ typedef struct SQueryParam { SGroupbyExpr *pGroupbyExpr; int32_t tableScanOperator; SArray *pOperator; + SUdfInfo *pUdfInfo; } SQueryParam; typedef struct STableScanInfo { @@ -396,7 +427,7 @@ typedef struct STagScanInfo { SColumnInfo* pCols; SSDataBlock* pRes; int32_t totalTables; - int32_t currentIndex; + int32_t curPos; } STagScanInfo; typedef struct SOptrBasicInfo { @@ -413,13 +444,13 @@ typedef struct SAggOperatorInfo { uint32_t seed; } SAggOperatorInfo; -typedef struct SArithOperatorInfo { +typedef struct SProjectOperatorInfo { SOptrBasicInfo binfo; int32_t bufCapacity; uint32_t seed; SSDataBlock *existDataBlock; -} SArithOperatorInfo; +} SProjectOperatorInfo; typedef struct SLimitOperatorInfo { int64_t limit; @@ -483,6 +514,7 @@ typedef struct SDistinctOperatorInfo { bool recordNullVal; //has already record the null value, no need to try again int64_t threshold; int64_t outputCapacity; + int32_t colIndex; } SDistinctOperatorInfo; struct SGlobalMerger; @@ -504,6 +536,7 @@ typedef struct SMultiwayMergeInfo { bool hasPrev; bool groupMix; + SArray *udfInfo; } SMultiwayMergeInfo; void appendUpstream(SOperatorInfo* p, SOperatorInfo* pUpstream); @@ -513,7 +546,7 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream); SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); @@ -526,8 +559,8 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows, void* merger, bool groupMix); +SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo); SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param); SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger); SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter); @@ -545,23 +578,25 @@ void doCompactSDataBlock(SSDataBlock* pBlock, int32_t numOfRows, int8_t* p); SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows); void* destroyOutputBuf(SSDataBlock* pBlock); +void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols); void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order); int32_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput); void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResultRowInfo* pResultRowInfo, int32_t* rowCellInfoOffset); void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOfInputRows); +void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity); void freeParam(SQueryParam *param); int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param); int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExprInfo** pExprInfo, - SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg); + SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg, SUdfInfo* pUdfInfo); int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, - SSqlExpr **pExpr, SExprInfo *prevExpr); + SSqlExpr **pExpr, SExprInfo *prevExpr, SUdfInfo *pUdfInfo); SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code); SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs, - SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t *qId); + SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, char* sql, uint64_t qId, SUdfInfo* pUdfInfo); int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* pQInfo, SQueryParam* param, char* start, int32_t prevResultLen, void* merger); @@ -580,16 +615,25 @@ bool doBuildResCheck(SQInfo* pQInfo); void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status); bool onlyQueryTags(SQueryAttr* pQueryAttr); +void destroyUdfInfo(SUdfInfo* pUdfInfo); + bool isValidQInfo(void *param); int32_t doDumpQueryResult(SQInfo *pQInfo, char *data); size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows); void setQueryKilled(SQInfo *pQInfo); + +void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType); +void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code); +void calculateOperatorProfResults(SQInfo* pQInfo); void queryCostStatis(SQInfo *pQInfo); + void freeQInfo(SQInfo *pQInfo); void freeQueryAttr(SQueryAttr *pQuery); int32_t getMaximumIdleDurationSec(); +void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t type); + #endif // TDENGINE_QEXECUTOR_H diff --git a/src/query/inc/qExtbuffer.h b/src/query/inc/qExtbuffer.h index b851fbb3e076db76a94b8542b83e6f3dc073f3f3..cf0e8ce31af91ad9d94feed2045265862a57545a 100644 --- a/src/query/inc/qExtbuffer.h +++ b/src/query/inc/qExtbuffer.h @@ -77,13 +77,13 @@ typedef struct tFilePagesItem { typedef struct SSchemaEx { struct SSchema field; - int16_t offset; + int32_t offset; } SSchemaEx; typedef struct SColumnModel { int32_t capacity; int32_t numOfCols; - int16_t rowSize; + int32_t rowSize; SSchemaEx *pFields; } SColumnModel; diff --git a/src/query/inc/qScript.h b/src/query/inc/qScript.h new file mode 100644 index 0000000000000000000000000000000000000000..574bb51368afeaeddef5fbd5c5bd8469fbe0cdef --- /dev/null +++ b/src/query/inc/qScript.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_QSCRIPT_H +#define TDENGINE_QSCRIPT_H + +#include +#include +#include + +#include "tutil.h" +#include "hash.h" +#include "tlist.h" +#include "qUdf.h" + +#define MAX_FUNC_NAME 64 + +#define USER_FUNC_NAME "funcName" +#define USER_FUNC_NAME_LIMIT 48 + +enum ScriptState { + SCRIPT_STATE_INIT, + SCRIPT_STATE_ADD, + SCRIPT_STATE_MERGE, + SCRIPT_STATE_FINALIZE +}; + + +typedef struct { + SHashObj *funcId; //func already registed in lua_env, may be no use + lua_State *lua_state; // lua env +} ScriptEnv; + +typedef struct ScriptCtx { + char funcName[USER_FUNC_NAME_LIMIT]; + int8_t state; + ScriptEnv *pEnv; + int8_t isAgg; // agg function or not + + // init value of udf script + int8_t resType; + int16_t resBytes; + + int32_t numOfOutput; + int32_t offset; + +} ScriptCtx; + +int taosLoadScriptInit(void *pInit); +void taosLoadScriptNormal(void *pInit, char *pInput, int16_t iType, int16_t iBytes, int32_t numOfRows, + int64_t *ptsList, int64_t key, char* pOutput, char *ptsOutput, int32_t *numOfOutput, int16_t oType, int16_t oBytes); +void taosLoadScriptFinalize(void *pInit, int64_t key, char *pOutput, int32_t *output); +void taosLoadScriptMerge(void *pCtx, char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput); +void taosLoadScriptDestroy(void *pInit); + +typedef struct { + SList *scriptEnvs; // + int32_t mSize; // pool limit + int32_t cSize; // current available size + pthread_mutex_t mutex; +} ScriptEnvPool; + +ScriptCtx* createScriptCtx(char *str, int8_t resType, int16_t resBytes); +void destroyScriptCtx(void *pScriptCtx); + +int32_t scriptEnvPoolInit(); +void scriptEnvPoolCleanup(); +bool isValidScript(char *script, int32_t len); + +#endif //TDENGINE_QSCRIPT_H diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index c6f667475ee7b061fcbc86be7a2aa8fac7282e8e..531ff06565dba837c696c6069d409ccf536cbe8c 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -182,6 +182,15 @@ typedef struct SCreateDbInfo { int16_t partitions; } SCreateDbInfo; +typedef struct SCreateFuncInfo { + SStrToken name; + SStrToken path; + int32_t type; + int32_t bufSize; + TAOS_FIELD output; +} SCreateFuncInfo; + + typedef struct SCreateAcctInfo { int32_t maxUsers; int32_t maxDbs; @@ -214,10 +223,11 @@ typedef struct SMiscInfo { int16_t tableType; SUserInfo user; union { - SCreateDbInfo dbOpt; - SCreateAcctInfo acctOpt; - SShowInfo showOpt; - SStrToken id; + SCreateDbInfo dbOpt; + SCreateAcctInfo acctOpt; + SCreateFuncInfo funcOpt; + SShowInfo showOpt; + SStrToken id; }; } SMiscInfo; @@ -226,6 +236,7 @@ typedef struct SSqlInfo { bool valid; SArray *list; // todo refactor char msg[256]; + SArray *funcs; union { SCreateTableSql *pCreateTableInfo; SAlterTableInfo *pAlterInfo; @@ -237,18 +248,20 @@ typedef struct tSqlExpr { uint16_t type; // sql node type uint32_t tokenId; // TK_LE: less than(binary expr) - // the whole string of the function(col, param), while the function name is kept in token - SStrToken operand; - uint32_t functionId; // function id + // the whole string of the function(col, param), while the function name is kept in exprToken + struct { + SStrToken operand; + struct SArray *paramList; // function parameters list + } Expr; - SStrToken colInfo; // table column info + int32_t functionId; // function id, todo remove it + SStrToken columnName; // table column info tVariant value; // the use input value - SStrToken token; // original sql expr string - uint32_t flags; + SStrToken exprToken; // original sql expr string + uint32_t flags; // todo remove it struct tSqlExpr *pLeft; // left child struct tSqlExpr *pRight; // right child - struct SArray *pParam; // function parameters list } tSqlExpr; // used in select clause. select from xxx @@ -269,6 +282,7 @@ SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrT // sql expr leaf node tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType); tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType); +SArray *tStrTokenAppend(SArray *pList, SStrToken *pToken); tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType); tSqlExpr *tSqlExprClone(tSqlExpr *pSrc); diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h index ec389de9e6dde48f3c7b0b200184ad75c4e0ec0f..520dade6683deedc604dc3287e5ff81cacac7850 100644 --- a/src/query/inc/qTableMeta.h +++ b/src/query/inc/qTableMeta.h @@ -106,11 +106,14 @@ typedef struct SQueryInfo { STagCond tagCond; SOrderVal order; - int16_t fillType; // final result fill type int16_t numOfTables; STableMetaInfo **pTableMetaInfo; struct STSBuf *tsBuf; + + int16_t fillType; // final result fill type int64_t * fillVal; // default value for fill + int32_t numOfFillVal; // fill value size + char * msg; // pointer to the pCmd->payload to keep error message temporarily int64_t clauseLimit; // limit for current sub clause @@ -118,13 +121,17 @@ typedef struct SQueryInfo { int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX - int16_t resColumnId; // result column id - bool distinctTag; // distinct tag or not + bool distinct; // distinct tag or not + bool onlyHasTagCond; int32_t round; // 0/1/.... int32_t bufLen; char* buf; - struct SQInfo* pQInfo; // global merge operator - struct SQueryAttr* pQueryAttr; // query object + + bool udfCopy; + SArray *pUdfInfo; + + struct SQInfo *pQInfo; // global merge operator + struct SQueryAttr *pQueryAttr; // query object struct SQueryInfo *sibling; // sibling SArray *pUpstream; // SArray @@ -138,8 +145,8 @@ typedef struct SQueryInfo { bool hasFilter; bool onlyTagQuery; bool orderProjectQuery; -// bool diffQuery; bool stateWindow; + bool globalMerge; } SQueryInfo; /** diff --git a/src/query/inc/qUdf.h b/src/query/inc/qUdf.h new file mode 100644 index 0000000000000000000000000000000000000000..1083b1e698f7591aae4586c7722e5343cd9c4d86 --- /dev/null +++ b/src/query/inc/qUdf.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_QUDF_H +#define TDENGINE_QUDF_H + +enum { TSDB_UDF_FUNC_NORMAL = 0, TSDB_UDF_FUNC_INIT, TSDB_UDF_FUNC_FINALIZE, TSDB_UDF_FUNC_MERGE, TSDB_UDF_FUNC_DESTROY, TSDB_UDF_FUNC_MAX_NUM }; + + + +typedef struct SUdfInit{ + int32_t maybe_null; /* 1 if function can return NULL */ + uint32_t decimals; /* for real functions */ + uint64_t length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int32_t const_item; /* 0 if result is independent of arguments */ + + // script like lua/javascript + void* script_ctx; + void (*destroyCtxFunc)(void *script_ctx); +} SUdfInit; + + +typedef struct SUdfInfo { + int32_t functionId; // system assigned function id + int32_t funcType; // scalar function or aggregate function + int8_t resType; // result type + int16_t resBytes; // result byte + int32_t contLen; // content length + int32_t bufSize; //interbuf size + char *name; // function name + void *handle; // handle loaded in mem + void *funcs[TSDB_UDF_FUNC_MAX_NUM]; // function ptr + + // for script like lua/javascript only + int isScript; + void *pScriptCtx; + + SUdfInit init; + char *content; + char *path; +} SUdfInfo; + +//script + +typedef int32_t (*scriptInitFunc)(void *pCtx); +typedef void (*scriptNormalFunc)(void *pCtx, char* data, int16_t iType, int16_t iBytes, int32_t numOfRows, + int64_t* ptList, int64_t key, char* dataOutput, char* tsOutput, int32_t* numOfOutput, int16_t oType, int16_t oBytes); +typedef void (*scriptFinalizeFunc)(void *pCtx, int64_t key, char* dataOutput, int32_t* numOfOutput); +typedef void (*scriptMergeFunc)(void *pCtx, char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput); +typedef void (*scriptDestroyFunc)(void* pCtx); + +// dynamic lib +typedef void (*udfNormalFunc)(char* data, int16_t itype, int16_t iBytes, int32_t numOfRows, int64_t* ts, char* dataOutput, char* interBuf, + char* tsOutput, int32_t* numOfOutput, int16_t oType, int16_t oBytes, SUdfInit* buf); +typedef int32_t (*udfInitFunc)(SUdfInit* data); +typedef void (*udfFinalizeFunc)(char* dataOutput, char *interBuf, int32_t* numOfOutput, SUdfInit* buf); +typedef void (*udfMergeFunc)(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf); +typedef void (*udfDestroyFunc)(SUdfInit* buf); + + +#endif // TDENGINE_QUDF_H diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 0756e4178598f09ecdc784b07e226b9054edcfef..d2802d9fe00be74f750abd39b81cc3585bcef773 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -24,7 +24,18 @@ memcpy((_k) + sizeof(uint64_t), (_ori), (_len)); \ } while (0) +#define SET_RES_EXT_WINDOW_KEY(_k, _ori, _len, _uid, _buf) \ + do { \ + assert(sizeof(_uid) == sizeof(uint64_t)); \ + *(void **)(_k) = (_buf); \ + *(uint64_t *)((_k) + POINTER_BYTES) = (_uid); \ + memcpy((_k) + POINTER_BYTES + sizeof(uint64_t), (_ori), (_len)); \ + } while (0) + + #define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t)) +#define GET_RES_EXT_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t) + POINTER_BYTES) + #define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) @@ -55,7 +66,8 @@ static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int return pResultRowInfo->pResult[slot]; } -static FORCE_INLINE char *getPosInResultPage(SQueryAttr *pQueryAttr, tFilePage* page, int32_t rowOffset, int16_t offset) { +static FORCE_INLINE char* getPosInResultPage(SQueryAttr* pQueryAttr, tFilePage* page, int32_t rowOffset, + int32_t offset) { assert(rowOffset >= 0 && pQueryAttr != NULL); int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery); @@ -93,4 +105,6 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo); int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, int32_t* offset); +int32_t initUdfInfo(SUdfInfo* pUdfInfo); + #endif // TDENGINE_QUERYUTIL_H diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 2a37b1fc2d81ca2f63cea18c521ed60a310881c9..ecdde4f7076b10a98fc36d75ebe4bac3691379bb 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -65,6 +65,7 @@ program ::= cmd. {} //////////////////////////////////THE SHOW STATEMENT/////////////////////////////////////////// cmd ::= SHOW DATABASES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DB, 0, 0);} cmd ::= SHOW TOPICS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_TP, 0, 0);} +cmd ::= SHOW FUNCTIONS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_FUNCTION, 0, 0);} cmd ::= SHOW MNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} cmd ::= SHOW DNODES. { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} cmd ::= SHOW ACCOUNTS. { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} @@ -147,6 +148,7 @@ cmd ::= DROP STABLE ifexists(Y) ids(X) cpxName(Z). { cmd ::= DROP DATABASE ifexists(Y) ids(X). { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y, TSDB_DB_TYPE_DEFAULT, -1); } cmd ::= DROP TOPIC ifexists(Y) ids(X). { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &X, &Y, TSDB_DB_TYPE_TOPIC, -1); } +cmd ::= DROP FUNCTION ids(X). { setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &X); } cmd ::= DROP DNODE ids(X). { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &X); } cmd ::= DROP USER ids(X). { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &X); } @@ -176,7 +178,7 @@ cmd ::= ALTER ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSql(p ////////////////////////////// COMPACT STATEMENT ////////////////////////////////////////////// -cmd ::= COMPACT VNODES IN LP exprlist(Y) RP. { setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, Y);} +cmd ::= COMPACT VNODES IN LP exprlist(Y) RP. { setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, Y);} // An IDENTIFIER can be a generic identifier, or one of several keywords. // Any non-standard keyword can also be an identifier. @@ -200,8 +202,13 @@ cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). { setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &X, &Y, &Z);} cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);} cmd ::= CREATE TOPIC ifnotexists(Z) ids(X) topic_optr(Y). { setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);} +cmd ::= CREATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize(B). { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &X, &Y, &Z, &B, 1);} +cmd ::= CREATE AGGREGATE FUNCTION ids(X) AS ids(Y) OUTPUTTYPE typename(Z) bufsize(B). { setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &X, &Y, &Z, &B, 2);} cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSql(pInfo, &X, &Y);} +bufsize(Y) ::= . { Y.n = 0; } +bufsize(Y) ::= BUFSIZE INTEGER(X). { Y = X; } + pps(Y) ::= . { Y.n = 0; } pps(Y) ::= PPS INTEGER(X). { Y = X; } @@ -303,11 +310,13 @@ alter_db_optr(Y) ::= alter_db_optr(Z) quorum(X). { Y = Z; Y.quorum = strtol alter_db_optr(Y) ::= alter_db_optr(Z) keep(X). { Y = Z; Y.keep = X; } alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } -alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } -alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) cachelast(X). { Y = Z; Y.cachelast = strtol(X.z, NULL, 10); } +// dynamically update the following two parameters are not allowed. +//alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); } +//alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } not support yet + %type alter_topic_optr {SCreateDbInfo} alter_topic_optr(Y) ::= alter_db_optr(Z). { Y = Z; Y.dbType = TSDB_DB_TYPE_TOPIC; } @@ -437,6 +446,7 @@ tagitem(A) ::= FLOAT(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } tagitem(A) ::= STRING(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } tagitem(A) ::= BOOL(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } tagitem(A) ::= NULL(X). { X.type = 0; tVariantCreate(&A, &X); } +tagitem(A) ::= NOW(X). { X.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&A, &X);} tagitem(A) ::= MINUS(X) INTEGER(Y).{ X.n += Y.n; @@ -469,7 +479,7 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). { //////////////////////// The SELECT statement ///////////////////////////////// %type select {SSqlNode*} %destructor select {destroySqlNode($$);} -select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) orderby_opt(Z) having_opt(N) slimit_opt(G) limit_opt(L). { +select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_opt(K) session_option(H) windowstate_option(D) fill_opt(F) sliding_opt(S) groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). { A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N); } @@ -680,7 +690,7 @@ where_opt(A) ::= WHERE expr(X). {A = X;} %type expr {tSqlExpr*} %destructor expr {tSqlExprDestroy($$);} -expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->token.z = X.z; A->token.n = (Z.z - X.z + 1);} +expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->exprToken.z = X.z; A->exprToken.n = (Z.z - X.z + 1);} expr(A) ::= ID(X). { A = tSqlExprCreateIdValue(&X, TK_ID);} expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(&X, TK_ID);} @@ -701,10 +711,10 @@ expr(A) ::= BOOL(X). { A = tSqlExprCreateIdValue(&X, TK_BOOL);} expr(A) ::= NULL(X). { A = tSqlExprCreateIdValue(&X, TK_NULL);} // ordinary functions: min(x), max(x), top(k, 20) -expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSqlExprCreateFunction(Y, &X, &E, X.type); } +expr(A) ::= ID(X) LP exprlist(Y) RP(E). { tStrTokenAppend(pInfo->funcs, &X); A = tSqlExprCreateFunction(Y, &X, &E, X.type); } // for parsing sql functions with wildcard for parameters. e.g., count(*)/first(*)/last(*) operation -expr(A) ::= ID(X) LP STAR RP(Y). { A = tSqlExprCreateFunction(NULL, &X, &Y, X.type); } +expr(A) ::= ID(X) LP STAR RP(Y). { tStrTokenAppend(pInfo->funcs, &X); A = tSqlExprCreateFunction(NULL, &X, &Y, X.type); } // is (not) null expression expr(A) ::= expr(X) IS NULL. {A = tSqlExprCreate(X, NULL, TK_ISNULL);} diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 68de90255643ee6ceae6b560cd53226827c29017..d96b260b13cf8a1bbcbc9b329a51bc1a714aba8d 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -27,6 +27,7 @@ #include "qPercentile.h" #include "qTsbuf.h" #include "queryLog.h" +#include "qUdf.h" #define GET_INPUT_DATA_LIST(x) ((char *)((x)->pInput)) #define GET_INPUT_DATA(x, y) (GET_INPUT_DATA_LIST(x) + (y) * (x)->inputBytes) @@ -58,7 +59,7 @@ for (int32_t _i = 0; _i < (ctx)->tagInfo.numOfTagCols; ++_i) { \ SQLFunctionCtx *__ctx = (ctx)->tagInfo.pTagCtxList[_i]; \ if (__ctx->functionId == TSDB_FUNC_TS_DUMMY) { \ - __ctx->tag.i64 = (ts); \ + __ctx->tag.i64 = (ts); \ __ctx->tag.nType = TSDB_DATA_TYPE_BIGINT; \ } \ aAggs[TSDB_FUNC_TAG].xFunction(__ctx); \ @@ -74,7 +75,6 @@ } while (0); void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} -void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {} void doFinalizer(SQLFunctionCtx *pCtx) { RESET_RESULT_INFO(GET_RES_INFO(pCtx)); } @@ -170,12 +170,13 @@ typedef struct SDerivInfo { } SDerivInfo; int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type, - int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable) { + int16_t *bytes, int32_t *interBytes, int16_t extLength, bool isSuperTable, SUdfInfo* pUdfInfo) { if (!isValidDataType(dataType)) { qError("Illegal data type %d or data type length %d", dataType, dataBytes); return TSDB_CODE_TSC_INVALID_OPERATION; } - + + if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_INTERP) { @@ -235,6 +236,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI } if (isSuperTable) { + if (functionId < 0) { + if (pUdfInfo->bufSize > 0) { + *type = TSDB_DATA_TYPE_BINARY; + *bytes = pUdfInfo->bufSize; + *interBytes = *bytes; + } else { + *type = pUdfInfo->resType; + *bytes = pUdfInfo->resBytes; + *interBytes = *bytes; + } + + return TSDB_CODE_SUCCESS; + } + if (functionId == TSDB_FUNC_MIN || functionId == TSDB_FUNC_MAX) { *type = TSDB_DATA_TYPE_BINARY; *bytes = (int16_t)(dataBytes + DATA_SET_FLAG_SIZE); @@ -289,7 +304,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI return TSDB_CODE_SUCCESS; } } - + if (functionId == TSDB_FUNC_SUM) { if (IS_SIGNED_NUMERIC_TYPE(dataType)) { *type = TSDB_DATA_TYPE_BIGINT; @@ -314,7 +329,20 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI *interBytes = sizeof(STwaInfo); return TSDB_CODE_SUCCESS; } - + + if (functionId < 0) { + *type = pUdfInfo->resType; + *bytes = pUdfInfo->resBytes; + + if (pUdfInfo->bufSize > 0) { + *interBytes = pUdfInfo->bufSize; + } else { + *interBytes = *bytes; + } + + return TSDB_CODE_SUCCESS; + } + if (functionId == TSDB_FUNC_AVG) { *type = TSDB_DATA_TYPE_DOUBLE; *bytes = sizeof(double); @@ -391,14 +419,13 @@ int32_t isValidFunction(const char* name, int32_t len) { return -1; } -static bool function_setup(SQLFunctionCtx *pCtx) { - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->initialized) { +static bool function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (pResultInfo->initialized) { return false; } memset(pCtx->pOutput, 0, (size_t)pCtx->outputBytes); - initResultInfo(pResInfo, pCtx->interBufBytes); + initResultInfo(pResultInfo, pCtx->interBufBytes); return true; } @@ -456,20 +483,6 @@ static void count_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, numOfElem, 1); } -static void count_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - *((int64_t *)pCtx->pOutput) += pCtx->size; - - // do not need it actually - SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->hasResult = DATA_SET_FLAG; -} - static void count_func_merge(SQLFunctionCtx *pCtx) { int64_t *pData = (int64_t *)GET_INPUT_DATA_LIST(pCtx); for (int32_t i = 0; i < pCtx->size; ++i) { @@ -535,7 +548,7 @@ int32_t noDataRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { if ((ctx)->hasNull && isNull((char *)&(list)[i], tsdbType)) { \ continue; \ } \ - TSKEY key = GET_TS_DATA(ctx, i); \ + TSKEY key = (ctx)->ptsList != NULL? GET_TS_DATA(ctx, i):0; \ UPDATE_DATA(ctx, val, (list)[i], num, sign, key); \ } @@ -609,46 +622,6 @@ static void do_sum(SQLFunctionCtx *pCtx) { } } -static void do_sum_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - int64_t *res = (int64_t*) pCtx->pOutput; - - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - *res += GET_INT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - *res += GET_INT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - *res += GET_INT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - *res += GET_INT64_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { - uint64_t *r = (uint64_t *)pCtx->pOutput; - *r += GET_UINT64_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *retVal = (double*) pCtx->pOutput; - *retVal += GET_DOUBLE_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - double *retVal = (double*) pCtx->pOutput; - *retVal += GET_FLOAT_VAL(pData); - } - - GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; -} - static void sum_function(SQLFunctionCtx *pCtx) { do_sum(pCtx); @@ -661,17 +634,6 @@ static void sum_function(SQLFunctionCtx *pCtx) { } } -static void sum_function_f(SQLFunctionCtx *pCtx, int32_t index) { - do_sum_f(pCtx, index); - - // keep the result data in output buffer, not in the intermediate buffer - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - SSumInfo *pSum = (SSumInfo *)pCtx->pOutput; - pSum->hasResult = DATA_SET_FLAG; - } -} - static void sum_func_merge(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; @@ -847,53 +809,6 @@ static void avg_function(SQLFunctionCtx *pCtx) { } } -static void avg_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - - // NOTE: keep the intermediate result into the interResultBuf - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - - SAvgInfo *pAvgInfo = (SAvgInfo *)GET_ROWCELL_INTERBUF(pResInfo); - - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - pAvgInfo->sum += GET_INT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - pAvgInfo->sum += GET_INT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - pAvgInfo->sum += GET_INT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - pAvgInfo->sum += GET_INT64_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - pAvgInfo->sum += GET_DOUBLE_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - pAvgInfo->sum += GET_FLOAT_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UTINYINT) { - pAvgInfo->sum += GET_UINT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_USMALLINT) { - pAvgInfo->sum += GET_UINT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UINT) { - pAvgInfo->sum += GET_UINT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_UBIGINT) { - pAvgInfo->sum += GET_UINT64_VAL(pData); - } - - // restore sum and count of elements - pAvgInfo->num += 1; - - // set has result flag - pResInfo->hasResult = DATA_SET_FLAG; - - // keep the data into the final output buffer for super table query since this execution may be the last one - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SAvgInfo)); - } -} - static void avg_func_merge(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); @@ -1098,8 +1013,8 @@ static void minMax_function(SQLFunctionCtx *pCtx, char *pOutput, int32_t isMin, } } -static bool min_func_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool min_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { return false; // not initialized since it has been initialized } @@ -1143,8 +1058,8 @@ static bool min_func_setup(SQLFunctionCtx *pCtx) { return true; } -static bool max_func_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool max_func_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { return false; // not initialized since it has been initialized } @@ -1307,78 +1222,6 @@ static void max_func_merge(SQLFunctionCtx *pCtx) { } } -static void minMax_function_f(SQLFunctionCtx *pCtx, int32_t index, int32_t isMin) { - char *pData = GET_INPUT_DATA(pCtx, index); - TSKEY key = GET_TS_DATA(pCtx, index); - - int32_t num = 0; - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - int8_t *output = (int8_t *)pCtx->pOutput; - int8_t i = GET_INT8_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - int16_t *output = (int16_t*) pCtx->pOutput; - int16_t i = GET_INT16_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - int32_t *output = (int32_t*) pCtx->pOutput; - int32_t i = GET_INT32_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT) { - int64_t *output = (int64_t*) pCtx->pOutput; - int64_t i = GET_INT64_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - float *output = (float*) pCtx->pOutput; - float i = GET_FLOAT_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - double *output = (double*) pCtx->pOutput; - double i = GET_DOUBLE_VAL(pData); - - UPDATE_DATA(pCtx, *output, i, num, isMin, key); - } - - GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; -} - -static void max_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - minMax_function_f(pCtx, index, 0); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - char *flag = pCtx->pOutput + pCtx->inputBytes; - *flag = DATA_SET_FLAG; - } -} - -static void min_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - minMax_function_f(pCtx, index, 1); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pResInfo->hasResult == DATA_SET_FLAG && pCtx->stableQuery) { - char *flag = pCtx->pOutput + pCtx->inputBytes; - *flag = DATA_SET_FLAG; - } -} - #define LOOP_STDDEV_IMPL(type, r, d, ctx, delta, _type, num) \ for (int32_t i = 0; i < (ctx)->size; ++i) { \ if ((ctx)->hasNull && isNull((char *)&((type *)d)[i], (_type))) { \ @@ -1472,114 +1315,6 @@ static void stddev_function(SQLFunctionCtx *pCtx) { } } -static void stddev_function_f(SQLFunctionCtx *pCtx, int32_t index) { - // the second stage to calculate standard deviation - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo); - - if (pCtx->currentStage == REPEAT_SCAN && pStd->stage == 0) { - pStd->stage++; - avg_finalizer(pCtx); - - pResInfo->initialized = true; // set it initialized to avoid re-initialization - - // save average value into tmpBuf, for second stage scan - SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo); - - pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput); - assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum)); - } - - /* the first stage is to calculate average value */ - if (pStd->stage == 0) { - avg_function_f(pCtx, index); - } else if (pStd->num > 0) { - double avg = pStd->avg; - void * pData = GET_INPUT_DATA(pCtx, index); - - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - pStd->res += POW2(GET_INT32_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - pStd->res += POW2(GET_FLOAT_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - pStd->res += POW2(GET_DOUBLE_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - pStd->res += POW2(GET_INT64_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - pStd->res += POW2(GET_INT16_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - pStd->res += POW2(GET_INT8_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_UINT: { - pStd->res += POW2(GET_UINT32_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - pStd->res += POW2(GET_UINT64_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - pStd->res += POW2(GET_UINT16_VAL(pData) - avg); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - pStd->res += POW2(GET_UINT8_VAL(pData) - avg); - break; - } - default: - qError("stddev function not support data type:%d", pCtx->inputType); - } - - SET_VAL(pCtx, 1, 1); - } -} - -static UNUSED_FUNC void stddev_next_step(SQLFunctionCtx *pCtx) { - /* - * the stddevInfo and the average info struct share the same buffer area - * And the position of each element in their struct is exactly the same matched - */ - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevInfo *pStd = GET_ROWCELL_INTERBUF(pResInfo); - - if (pStd->stage == 0) { - /* - * stddev is calculated in two stage: - * 1. get the average value of all data; - * 2. get final result, based on the average values; - * so, if this routine is in second stage, no further step is required - */ - pStd->stage++; - avg_finalizer(pCtx); - - pResInfo->initialized = true; // set it initialized to avoid re-initialization - - // save average value into tmpBuf, for second stage scan - SAvgInfo *pAvg = GET_ROWCELL_INTERBUF(pResInfo); - - pStd->avg = GET_DOUBLE_VAL(pCtx->pOutput); - assert((isnan(pAvg->sum) && pAvg->num == 0) || (pStd->num == pAvg->num && pStd->avg == pAvg->sum)); - } else { - pResInfo->complete = true; - } -} - static void stddev_finalizer(SQLFunctionCtx *pCtx) { SStddevInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); @@ -1696,132 +1431,41 @@ static void stddev_dst_function(SQLFunctionCtx *pCtx) { memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); } -static void stddev_dst_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; +static void stddev_dst_merge(SQLFunctionCtx *pCtx) { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo); + + char *input = GET_INPUT_DATA_LIST(pCtx); + + for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { + SStddevdstInfo *pInput = (SStddevdstInfo *)input; + if (pInput->num == 0) { // current input is null + continue; + } + + pRes->num += pInput->num; + pRes->res += pInput->res; } +} - // the second stage to calculate standard deviation +static void stddev_dst_finalizer(SQLFunctionCtx *pCtx) { SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - double *retVal = &pStd->res; - // all data are null, no need to proceed - SArray* resList = (SArray*) pCtx->param[0].pz; - if (resList == NULL) { - return; + if (pStd->num <= 0) { + setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); + } else { + double *retValue = (double *)pCtx->pOutput; + *retValue = sqrt(pStd->res / pStd->num); + SET_VAL(pCtx, 1, 1); } - // find the correct group average results according to the tag value - int32_t len = (int32_t) taosArrayGetSize(resList); - assert(len > 0); - - double avg = 0; - if (len == 1) { - SResPair* p = taosArrayGet(resList, 0); - avg = p->avg; - } else { // todo opt performance by using iterator since the timestamp lsit is matched with the output result - SResPair* p = bsearch(&pCtx->startTs, resList->pData, len, sizeof(SResPair), tsCompare); - assert(p != NULL); + doFinalizer(pCtx); +} - avg = p->avg; - } - - int32_t num = 0; - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - for (int32_t i = 0; i < pCtx->size; ++i) { - if (pCtx->hasNull && isNull((const char*) (&((int32_t *)pData)[i]), pCtx->inputType)) { - continue; - } - num += 1; - *retVal += POW2(((int32_t *)pData)[i] - avg); - } - break; - } - case TSDB_DATA_TYPE_FLOAT: { - LOOP_STDDEV_IMPL(float, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - LOOP_STDDEV_IMPL(double, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_UTINYINT: { - LOOP_STDDEV_IMPL(int8_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - LOOP_STDDEV_IMPL(int16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_USMALLINT: { - LOOP_STDDEV_IMPL(uint16_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_UINT: { - LOOP_STDDEV_IMPL(uint32_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - LOOP_STDDEV_IMPL(int64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - case TSDB_DATA_TYPE_UBIGINT: { - LOOP_STDDEV_IMPL(uint64_t, *retVal, pData, pCtx, avg, pCtx->inputType, num); - break; - } - default: - qError("stddev function not support data type:%d", pCtx->inputType); - } - - pStd->num += num; - SET_VAL(pCtx, num, 1); - - // copy to the final output buffer for super table - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)), sizeof(SAvgInfo)); -} - - -static void stddev_dst_merge(SQLFunctionCtx *pCtx) { - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SStddevdstInfo* pRes = GET_ROWCELL_INTERBUF(pResInfo); - - char *input = GET_INPUT_DATA_LIST(pCtx); - - for (int32_t i = 0; i < pCtx->size; ++i, input += pCtx->inputBytes) { - SStddevdstInfo *pInput = (SStddevdstInfo *)input; - if (pInput->num == 0) { // current input is null - continue; - } - - pRes->num += pInput->num; - pRes->res += pInput->res; - } -} - -static void stddev_dst_finalizer(SQLFunctionCtx *pCtx) { - SStddevdstInfo *pStd = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); - - if (pStd->num <= 0) { - setNull(pCtx->pOutput, pCtx->outputType, pCtx->outputBytes); - } else { - double *retValue = (double *)pCtx->pOutput; - *retValue = sqrt(pStd->res / pStd->num); - SET_VAL(pCtx, 1, 1); - } - - doFinalizer(pCtx); -} - -////////////////////////////////////////////////////////////////////////////////////// -static bool first_last_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { - return false; +////////////////////////////////////////////////////////////////////////////////////// +static bool first_last_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { + return false; } // used to keep the timestamp for comparison @@ -1833,7 +1477,7 @@ static bool first_last_function_setup(SQLFunctionCtx *pCtx) { // todo opt for null block static void first_function(SQLFunctionCtx *pCtx) { - if (pCtx->order == TSDB_ORDER_DESC /*|| pCtx->preAggVals.dataBlockLoaded == false*/) { + if (pCtx->order == TSDB_ORDER_DESC) { return; } @@ -1847,10 +1491,11 @@ static void first_function(SQLFunctionCtx *pCtx) { } memcpy(pCtx->pOutput, data, pCtx->inputBytes); - - TSKEY k = GET_TS_DATA(pCtx, i); - DO_UPDATE_TAG_COLUMNS(pCtx, k); - + if (pCtx->ptsList != NULL) { + TSKEY k = GET_TS_DATA(pCtx, i); + DO_UPDATE_TAG_COLUMNS(pCtx, k); + } + SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); pInfo->hasResult = DATA_SET_FLAG; pInfo->complete = true; @@ -1862,27 +1507,6 @@ static void first_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void first_function_f(SQLFunctionCtx *pCtx, int32_t index) { - if (pCtx->order == TSDB_ORDER_DESC) { - return; - } - - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); - - TSKEY ts = GET_TS_DATA(pCtx, index); - DO_UPDATE_TAG_COLUMNS(pCtx, ts); - - SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->hasResult = DATA_SET_FLAG; - pInfo->complete = true; // get the first not-null data, completed -} - static void first_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { int64_t *timestamp = GET_TS_LIST(pCtx); @@ -1932,21 +1556,6 @@ static void first_dist_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void first_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - if (pCtx->order == TSDB_ORDER_DESC) { - return; - } - - first_data_assign_impl(pCtx, pData, index); - - SET_VAL(pCtx, 1, 1); -} - static void first_dist_func_merge(SQLFunctionCtx *pCtx) { assert(pCtx->stableQuery); @@ -1978,70 +1587,55 @@ static void first_dist_func_merge(SQLFunctionCtx *pCtx) { * least one data in this block that is not null.(TODO opt for this case) */ static void last_function(SQLFunctionCtx *pCtx) { - if (pCtx->order != pCtx->param[0].i64/* || pCtx->preAggVals.dataBlockLoaded == false*/) { + if (pCtx->order != pCtx->param[0].i64) { return; } - + + SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx); + int32_t notNullElems = 0; - - for (int32_t i = pCtx->size - 1; i >= 0; --i) { - char *data = GET_INPUT_DATA(pCtx, i); - if (pCtx->hasNull && isNull(data, pCtx->inputType)) { - if (!pCtx->requireNull) { - continue; - } - } + if (pCtx->order == TSDB_ORDER_DESC) { - memcpy(pCtx->pOutput, data, pCtx->inputBytes); - - TSKEY ts = GET_TS_DATA(pCtx, i); - DO_UPDATE_TAG_COLUMNS(pCtx, ts); - - SResultRowCellInfo *pInfo = GET_RES_INFO(pCtx); - pInfo->hasResult = DATA_SET_FLAG; - - pInfo->complete = true; // set query completed on this column - notNullElems++; - break; - } - - SET_VAL(pCtx, notNullElems, 1); -} + for (int32_t i = pCtx->size - 1; i >= 0; --i) { + char *data = GET_INPUT_DATA(pCtx, i); + if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) { + continue; + } -static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } + memcpy(pCtx->pOutput, data, pCtx->inputBytes); - // the scan order is not the required order, ignore it - if (pCtx->order != pCtx->param[0].i64) { - return; - } + TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0; + DO_UPDATE_TAG_COLUMNS(pCtx, ts); - if (pCtx->order == TSDB_ORDER_DESC) { - SET_VAL(pCtx, 1, 1); - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); + pResInfo->hasResult = DATA_SET_FLAG; + pResInfo->complete = true; // set query completed on this column + notNullElems++; + break; + } + } else { // ascending order + for (int32_t i = pCtx->size - 1; i >= 0; --i) { + char *data = GET_INPUT_DATA(pCtx, i); + if (pCtx->hasNull && isNull(data, pCtx->inputType) && (!pCtx->requireNull)) { + continue; + } - TSKEY ts = GET_TS_DATA(pCtx, index); - DO_UPDATE_TAG_COLUMNS(pCtx, ts); + TSKEY ts = pCtx->ptsList ? GET_TS_DATA(pCtx, i) : 0; - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->hasResult = DATA_SET_FLAG; - pResInfo->complete = true; // set query completed - } else { // in case of ascending order check, all data needs to be checked - SResultRowCellInfo* pResInfo = GET_RES_INFO(pCtx); - TSKEY ts = GET_TS_DATA(pCtx, index); + char* buf = GET_ROWCELL_INTERBUF(pResInfo); + if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) { + pResInfo->hasResult = DATA_SET_FLAG; + memcpy(pCtx->pOutput, data, pCtx->inputBytes); - char* buf = GET_ROWCELL_INTERBUF(pResInfo); - if (pResInfo->hasResult != DATA_SET_FLAG || (*(TSKEY*)buf) < ts) { - pResInfo->hasResult = DATA_SET_FLAG; - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); + *(TSKEY*)buf = ts; + DO_UPDATE_TAG_COLUMNS(pCtx, ts); + } - *(TSKEY*)buf = ts; - DO_UPDATE_TAG_COLUMNS(pCtx, ts); + notNullElems++; + break; } } + + SET_VAL(pCtx, notNullElems, 1); } static void last_data_assign_impl(SQLFunctionCtx *pCtx, char *pData, int32_t index) { @@ -2092,29 +1686,6 @@ static void last_dist_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, notNullElems, 1); } -static void last_dist_function_f(SQLFunctionCtx *pCtx, int32_t index) { - if (pCtx->size == 0) { - return; - } - - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - /* - * 1. for scan data in asc order, no need to check data - * 2. for data blocks that are not loaded, no need to check data - */ - if (pCtx->order != pCtx->param[0].i64) { - return; - } - - last_data_assign_impl(pCtx, pData, index); - - SET_VAL(pCtx, 1, 1); -} - /* * in the secondary merge(local reduce), the output is limited by the * final output size, so the main difference between last_dist_func_merge and second_merge @@ -2216,6 +1787,49 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6 memcpy((dst)->pTags, (src)->pTags, (size_t)(__l)); \ } while (0) +static int32_t topBotComparFn(const void *p1, const void *p2, const void *param) +{ + uint16_t type = *(uint16_t *) param; + tValuePair *val1 = *(tValuePair **) p1; + tValuePair *val2 = *(tValuePair **) p2; + + if (IS_SIGNED_NUMERIC_TYPE(type)) { + if (val1->v.i64 == val2->v.i64) { + return 0; + } + + return (val1->v.i64 > val2->v.i64) ? 1 : -1; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + if (val1->v.u64 == val2->v.u64) { + return 0; + } + + return (val1->v.u64 > val2->v.u64) ? 1 : -1; + } + + if (val1->v.dKey == val2->v.dKey) { + return 0; + } + + return (val1->v.dKey > val2->v.dKey) ? 1 : -1; +} + +static void topBotSwapFn(void *dst, void *src, const void *param) +{ + char tag[32768]; + tValuePair temp; + uint16_t tagLen = *(uint16_t *) param; + tValuePair *vdst = *(tValuePair **) dst; + tValuePair *vsrc = *(tValuePair **) src; + + memset(tag, 0, sizeof(tag)); + temp.pTags = tag; + + VALUEPAIRASSIGN(&temp, vdst, tagLen); + VALUEPAIRASSIGN(vdst, vsrc, tagLen); + VALUEPAIRASSIGN(vsrc, &temp, tagLen); +} + static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type, SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) { tVariant val = {0}; @@ -2223,61 +1837,19 @@ static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, tValuePair **pList = pInfo->res; assert(pList != NULL); - + if (pInfo->num < maxLen) { - if (pInfo->num == 0 || - (IS_SIGNED_NUMERIC_TYPE(type) && val.i64 >= pList[pInfo->num - 1]->v.i64) || - (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 >= pList[pInfo->num - 1]->v.u64) || - (IS_FLOAT_TYPE(type) && val.dKey >= pList[pInfo->num - 1]->v.dKey)) { - valuePairAssign(pList[pInfo->num], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage); - } else { - int32_t i = pInfo->num - 1; - if (IS_SIGNED_NUMERIC_TYPE(type)) { - while (i >= 0 && pList[i]->v.i64 > val.i64) { - VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); - i -= 1; - } - } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { - while (i >= 0 && pList[i]->v.u64 > val.u64) { - VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); - i -= 1; - } - } else { - while (i >= 0 && pList[i]->v.dKey > val.dKey) { - VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); - i -= 1; - } - } - - valuePairAssign(pList[i + 1], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage); - } - + valuePairAssign(pList[pInfo->num], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage); + + taosheapsort((void *) pList, sizeof(tValuePair **), pInfo->num + 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 0); + pInfo->num++; } else { - int32_t i = 0; - if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 > pList[0]->v.i64) || (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 > pList[0]->v.u64) || (IS_FLOAT_TYPE(type) && val.dKey > pList[0]->v.dKey)) { - // find the appropriate the slot position - if (IS_SIGNED_NUMERIC_TYPE(type)) { - while (i + 1 < maxLen && pList[i + 1]->v.i64 < val.i64) { - VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); - i += 1; - } - } if (IS_UNSIGNED_NUMERIC_TYPE(type)) { - while (i + 1 < maxLen && pList[i + 1]->v.u64 < val.u64) { - VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); - i += 1; - } - } else { - while (i + 1 < maxLen && pList[i + 1]->v.dKey < val.dKey) { - VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); - i += 1; - } - } - - valuePairAssign(pList[i], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[0], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage); + taosheapadjust((void *) pList, sizeof(tValuePair **), 0, maxLen - 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 0); } } } @@ -2291,57 +1863,17 @@ static void do_bottom_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pDa assert(pList != NULL); if (pInfo->num < maxLen) { - if (pInfo->num == 0) { - valuePairAssign(pList[pInfo->num], type, (const char*) &val.i64, ts, pTags, pTagInfo, stage); - } else { - int32_t i = pInfo->num - 1; - - if (IS_SIGNED_NUMERIC_TYPE(type)) { - while (i >= 0 && pList[i]->v.i64 < val.i64) { - VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); - i -= 1; - } - } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { - while (i >= 0 && pList[i]->v.u64 < val.u64) { - VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); - i -= 1; - } - } else { - while (i >= 0 && pList[i]->v.dKey < val.dKey) { - VALUEPAIRASSIGN(pList[i + 1], pList[i], pTagInfo->tagsLen); - i -= 1; - } - } - - valuePairAssign(pList[i + 1], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage); - } - + valuePairAssign(pList[pInfo->num], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage); + + taosheapsort((void *) pList, sizeof(tValuePair **), pInfo->num + 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 1); + pInfo->num++; } else { - int32_t i = 0; - if ((IS_SIGNED_NUMERIC_TYPE(type) && val.i64 < pList[0]->v.i64) || (IS_UNSIGNED_NUMERIC_TYPE(type) && val.u64 < pList[0]->v.u64) || (IS_FLOAT_TYPE(type) && val.dKey < pList[0]->v.dKey)) { - // find the appropriate the slot position - if (IS_SIGNED_NUMERIC_TYPE(type)) { - while (i + 1 < maxLen && pList[i + 1]->v.i64 > val.i64) { - VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); - i += 1; - } - } if (IS_UNSIGNED_NUMERIC_TYPE(type)) { - while (i + 1 < maxLen && pList[i + 1]->v.u64 > val.u64) { - VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); - i += 1; - } - } else { - while (i + 1 < maxLen && pList[i + 1]->v.dKey > val.dKey) { - VALUEPAIRASSIGN(pList[i], pList[i + 1], pTagInfo->tagsLen); - i += 1; - } - } - - valuePairAssign(pList[i], type, (const char*)&val.i64, ts, pTags, pTagInfo, stage); + valuePairAssign(pList[0], type, (const char *)&val.i64, ts, pTags, pTagInfo, stage); + taosheapadjust((void *) pList, sizeof(tValuePair **), 0, maxLen - 1, (const void *) &type, topBotComparFn, (const void *) &pTagInfo->tagsLen, topBotSwapFn, 1); } } } @@ -2503,7 +2035,7 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { char *tmp = (char *)pTopBotInfo + sizeof(STopBotInfo); pTopBotInfo->res = (tValuePair**) tmp; tmp += POINTER_BYTES * pCtx->param[0].i64; - + size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; for (int32_t i = 0; i < pCtx->param[0].i64; ++i) { @@ -2569,14 +2101,13 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const cha } } -static bool top_bottom_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool top_bottom_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; } STopBotInfo *pInfo = getTopBotOutputInfo(pCtx); buildTopBotStruct(pInfo, pCtx); - return true; } @@ -2616,28 +2147,6 @@ static void top_function(SQLFunctionCtx *pCtx) { } } -static void top_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - STopBotInfo *pRes = getTopBotOutputInfo(pCtx); - assert(pRes->num >= 0); - - if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { - buildTopBotStruct(pRes, pCtx); - } - - SET_VAL(pCtx, 1, 1); - TSKEY ts = GET_TS_DATA(pCtx, index); - - do_top_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void top_func_merge(SQLFunctionCtx *pCtx) { STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx); @@ -2695,27 +2204,6 @@ static void bottom_function(SQLFunctionCtx *pCtx) { } } -static void bottom_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - TSKEY ts = GET_TS_DATA(pCtx, index); - - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - STopBotInfo *pRes = getTopBotOutputInfo(pCtx); - - if ((void *)pRes->res[0] != (void *)((char *)pRes + sizeof(STopBotInfo) + POINTER_BYTES * pCtx->param[0].i64)) { - buildTopBotStruct(pRes, pCtx); - } - - SET_VAL(pCtx, 1, 1); - do_bottom_function_add(pRes, (int32_t)pCtx->param[0].i64, pData, ts, pCtx->inputType, &pCtx->tagInfo, NULL, 0); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void bottom_func_merge(SQLFunctionCtx *pCtx) { STopBotInfo *pInput = (STopBotInfo *)GET_INPUT_DATA_LIST(pCtx); @@ -2768,14 +2256,13 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) { } /////////////////////////////////////////////////////////////////////////////////////////////// -static bool percentile_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool percentile_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { return false; } // in the first round, get the min-max value of all involved data - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); + SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResultInfo); SET_DOUBLE_VAL(&pInfo->minval, DBL_MAX); SET_DOUBLE_VAL(&pInfo->maxval, -DBL_MAX); pInfo->numOfElems = 0; @@ -2860,55 +2347,11 @@ static void percentile_function(SQLFunctionCtx *pCtx) { continue; } - notNullElems += 1; - tMemBucketPut(pInfo->pMemBucket, data, 1); - } - - SET_VAL(pCtx, notNullElems, 1); - pResInfo->hasResult = DATA_SET_FLAG; -} - -static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = (SPercentileInfo *)GET_ROWCELL_INTERBUF(pResInfo); - - if (pCtx->currentStage == REPEAT_SCAN && pInfo->stage == 0) { - pInfo->stage += 1; - - // all data are null, set it completed - if (pInfo->numOfElems == 0) { - pResInfo->complete = true; - - return; - } else { - pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); - } - } - - if (pInfo->stage == 0) { - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - if (v < GET_DOUBLE_VAL(&pInfo->minval)) { - SET_DOUBLE_VAL(&pInfo->minval, v); - } - - if (v > GET_DOUBLE_VAL(&pInfo->maxval)) { - SET_DOUBLE_VAL(&pInfo->maxval, v); - } - - pInfo->numOfElems += 1; - return; + notNullElems += 1; + tMemBucketPut(pInfo->pMemBucket, data, 1); } - tMemBucketPut(pInfo->pMemBucket, pData, 1); - - SET_VAL(pCtx, 1, 1); + SET_VAL(pCtx, notNullElems, 1); pResInfo->hasResult = DATA_SET_FLAG; } @@ -2930,24 +2373,6 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) { doFinalizer(pCtx); } -static UNUSED_FUNC void percentile_next_step(SQLFunctionCtx *pCtx) { - SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); - SPercentileInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - if (pInfo->stage == 0) { - // all data are null, set it completed - if (pInfo->numOfElems == 0) { - pResInfo->complete = true; - } else { - pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval); - } - - pInfo->stage += 1; - } else { - pResInfo->complete = true; - } -} - ////////////////////////////////////////////////////////////////////////////////// static void buildHistogramInfo(SAPercentileInfo* pInfo) { pInfo->pHisto = (SHistogramInfo*) ((char*) pInfo + sizeof(SAPercentileInfo)); @@ -2968,8 +2393,8 @@ static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) { return pInfo; } -static bool apercentile_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool apercentile_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { return false; } @@ -3012,24 +2437,6 @@ static void apercentile_function(SQLFunctionCtx *pCtx) { } } -static void apercentile_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); - SAPercentileInfo *pInfo = getAPerctInfo(pCtx); - - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - tHistogramAdd(&pInfo->pHisto, v); - - SET_VAL(pCtx, 1, 1); - pResInfo->hasResult = DATA_SET_FLAG; -} - static void apercentile_func_merge(SQLFunctionCtx *pCtx) { SAPercentileInfo *pInput = (SAPercentileInfo *)GET_INPUT_DATA_LIST(pCtx); @@ -3096,12 +2503,11 @@ static void apercentile_finalizer(SQLFunctionCtx *pCtx) { } ///////////////////////////////////////////////////////////////////////////////// -static bool leastsquares_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool leastsquares_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; } - - SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); + SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); // 2*3 matrix @@ -3213,60 +2619,6 @@ static void leastsquares_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, numOfElem, 1); } -static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SLeastsquaresInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - double(*param)[3] = pInfo->mat; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - int32_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_TINYINT: { - int8_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_SMALLINT: { - int16_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_BIGINT: { - int64_t *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_FLOAT: { - float *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - case TSDB_DATA_TYPE_DOUBLE: { - double *p = pData; - LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); - break; - } - default: - qError("error data type in leastsquare function:%d", pCtx->inputType); - }; - - SET_VAL(pCtx, 1, 1); - pInfo->num += 1; - - if (pInfo->num > 0) { - pResInfo->hasResult = DATA_SET_FLAG; - } -} - static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { // no data in query SResultRowCellInfo * pResInfo = GET_RES_INFO(pCtx); @@ -3304,25 +2656,23 @@ static void date_col_output_function(SQLFunctionCtx *pCtx) { *(int64_t *)(pCtx->pOutput) = pCtx->startTs; } -static FORCE_INLINE void date_col_output_function_f(SQLFunctionCtx *pCtx, int32_t index) { - date_col_output_function(pCtx); -} - static void col_project_function(SQLFunctionCtx *pCtx) { // the number of output rows should not affect the final number of rows, so set it to be 0 if (pCtx->numOfParams == 2) { return; } + + // only one row is required. if (pCtx->param[0].i64 == 1) { SET_VAL(pCtx, pCtx->size, 1); } else { INC_INIT_VAL(pCtx, pCtx->size); } - char *pData = GET_INPUT_DATA_LIST(pCtx); if (pCtx->order == TSDB_ORDER_ASC) { - memcpy(pCtx->pOutput, pData, (size_t) pCtx->size * pCtx->inputBytes); + int32_t numOfRows = (pCtx->param[0].i64 == 1)? 1:pCtx->size; + memcpy(pCtx->pOutput, pData, (size_t) numOfRows * pCtx->inputBytes); } else { for(int32_t i = 0; i < pCtx->size; ++i) { memcpy(pCtx->pOutput + (pCtx->size - 1 - i) * pCtx->inputBytes, pData + i * pCtx->inputBytes, @@ -3331,22 +2681,6 @@ static void col_project_function(SQLFunctionCtx *pCtx) { } } -static void col_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - if (pCtx->numOfParams == 2) { // the number of output rows should not affect the final number of rows, so set it to be 0 - return; - } - - // only one output - if (pCtx->param[0].i64 == 1 && pResInfo->numOfRes >= 1) { - return; - } - - INC_INIT_VAL(pCtx, 1); - char *pData = GET_INPUT_DATA(pCtx, index); - memcpy(pCtx->pOutput, pData, pCtx->inputBytes); -} - /** * only used for tag projection query in select clause * @param pCtx @@ -3368,13 +2702,6 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { } } -static void tag_project_function_f(SQLFunctionCtx *pCtx, int32_t index) { - INC_INIT_VAL(pCtx, 1); - - tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->tag.nType, true); - pCtx->pOutput += pCtx->outputBytes; -} - /** * used in group by clause. when applying group by tags, the tags value is * assign by using tag function. @@ -3393,11 +2720,6 @@ static void tag_function(SQLFunctionCtx *pCtx) { } } -static void tag_function_f(SQLFunctionCtx *pCtx, int32_t index) { - SET_VAL(pCtx, 1, 1); - tVariantDump(&pCtx->tag, pCtx->pOutput, pCtx->outputType, true); -} - static void copy_function(SQLFunctionCtx *pCtx) { SET_VAL(pCtx, pCtx->size, 1); @@ -3409,8 +2731,8 @@ enum { INITIAL_VALUE_NOT_ASSIGNED = 0, }; -static bool diff_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool diff_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; } @@ -3419,14 +2741,13 @@ static bool diff_function_setup(SQLFunctionCtx *pCtx) { return false; } -static bool deriv_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool deriv_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResultInfo) { + if (!function_setup(pCtx, pResultInfo)) { return false; } // diff function require the value is set to -1 - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo); + SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResultInfo); pDerivInfo->ignoreNegative = pCtx->param[1].i64; pDerivInfo->prevTs = -1; @@ -3613,16 +2934,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) { qError("error input type"); } - // initial value is not set yet, all data block are null - if (!pDerivInfo->valueSet || notNullElems <= 0) { - /* - * 1. current block and blocks before are full of null - * 2. current block may be null value - */ - assert(pCtx->hasNull); - } else { - GET_RES_INFO(pCtx)->numOfRes += notNullElems; - } + GET_RES_INFO(pCtx)->numOfRes += notNullElems; } #define DIFF_IMPL(ctx, d, type) \ @@ -3802,61 +3114,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { } } -static void diff_function_f(SQLFunctionCtx *pCtx, int32_t index) { - char *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // the output start from the second source element - if (pCtx->param[1].nType != INITIAL_VALUE_NOT_ASSIGNED) { // initial value is set - GET_RES_INFO(pCtx)->numOfRes += 1; - } - - int32_t step = 1/*GET_FORWARD_DIRECTION_FACTOR(pCtx->order)*/; - - switch (pCtx->inputType) { - case TSDB_DATA_TYPE_INT: { - if (pCtx->param[1].nType == INITIAL_VALUE_NOT_ASSIGNED) { // initial value is not set yet - pCtx->param[1].nType = pCtx->inputType; - pCtx->param[1].i64 = *(int32_t *)pData; - } else { - *(int32_t *)pCtx->pOutput = *(int32_t *)pData - (int32_t)pCtx->param[1].i64; - pCtx->param[1].i64 = *(int32_t *)pData; - *(int64_t *)pCtx->ptsOutputBuf = GET_TS_DATA(pCtx, index); - } - break; - }; - case TSDB_DATA_TYPE_BIGINT: { - DIFF_IMPL(pCtx, pData, int64_t); - break; - }; - case TSDB_DATA_TYPE_DOUBLE: { - DIFF_IMPL(pCtx, pData, double); - break; - }; - case TSDB_DATA_TYPE_FLOAT: { - DIFF_IMPL(pCtx, pData, float); - break; - }; - case TSDB_DATA_TYPE_SMALLINT: { - DIFF_IMPL(pCtx, pData, int16_t); - break; - }; - case TSDB_DATA_TYPE_TINYINT: { - DIFF_IMPL(pCtx, pData, int8_t); - break; - }; - default: - qError("error input type"); - } - - if (GET_RES_INFO(pCtx)->numOfRes > 0) { - pCtx->pOutput += pCtx->outputBytes * step; - pCtx->ptsOutputBuf = (char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * step; - } -} - char *getArithColumnData(void *param, const char* name, int32_t colId) { SArithmeticSupport *pSupport = (SArithmeticSupport *)param; @@ -3879,16 +3136,6 @@ static void arithmetic_function(SQLFunctionCtx *pCtx) { arithmeticTreeTraverse(sas->pExprInfo->pExpr, pCtx->size, pCtx->pOutput, sas, pCtx->order, getArithColumnData); } -static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { - INC_INIT_VAL(pCtx, 1); - SArithmeticSupport *sas = (SArithmeticSupport *)pCtx->param[1].pz; - - sas->offset = index; - arithmeticTreeTraverse(sas->pExprInfo->pExpr, 1, pCtx->pOutput, sas, pCtx->order, getArithColumnData); - - pCtx->pOutput += pCtx->outputBytes; -} - #define LIST_MINMAX_N(ctx, minOutput, maxOutput, elemCnt, data, type, tsdbType, numOfNotNullElem) \ { \ type *inputData = (type *)data; \ @@ -3907,12 +3154,12 @@ static void arithmetic_function_f(SQLFunctionCtx *pCtx, int32_t index) { } ///////////////////////////////////////////////////////////////////////////////// -static bool spread_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool spread_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; } - SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); + SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); // this is the server-side setup function in client-side, the secondary merge do not need this procedure if (pCtx->currentStage == MERGE_STAGE) { @@ -4007,49 +3254,6 @@ static void spread_function(SQLFunctionCtx *pCtx) { } } -static void spread_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SET_VAL(pCtx, 1, 1); - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SSpreadInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - double val = 0.0; - if (pCtx->inputType == TSDB_DATA_TYPE_TINYINT) { - val = GET_INT8_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_SMALLINT) { - val = GET_INT16_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_INT) { - val = GET_INT32_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_BIGINT || pCtx->inputType == TSDB_DATA_TYPE_TIMESTAMP) { - val = (double)(GET_INT64_VAL(pData)); - } else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE) { - val = GET_DOUBLE_VAL(pData); - } else if (pCtx->inputType == TSDB_DATA_TYPE_FLOAT) { - val = GET_FLOAT_VAL(pData); - } - - // keep the result data in output buffer, not in the intermediate buffer - if (val > pInfo->max) { - pInfo->max = val; - } - - if (val < pInfo->min) { - pInfo->min = val; - } - - pResInfo->hasResult = DATA_SET_FLAG; - pInfo->hasResult = DATA_SET_FLAG; - - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SSpreadInfo)); - } -} - /* * here we set the result value back to the intermediate buffer, to apply the finalize the function * the final result is generated in spread_function_finalizer @@ -4109,12 +3313,10 @@ void spread_function_finalizer(SQLFunctionCtx *pCtx) { * param[2]: end time * @param pCtx */ -static bool twa_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool twa_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); pInfo->p.key = INT64_MIN; @@ -4402,26 +3604,6 @@ static void twa_function(SQLFunctionCtx *pCtx) { } } -static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - int32_t notNullElems = twa_function_impl(pCtx, index, 1); - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - - SET_VAL(pCtx, notNullElems, 1); - - if (notNullElems > 0) { - pResInfo->hasResult = DATA_SET_FLAG; - } - - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(STwaInfo)); - } -} - /* * To copy the input to interResBuf to avoid the input buffer space be over writen * by next input data. The TWA function only applies to each table, so no merge procedure @@ -4566,14 +3748,12 @@ static void interp_function(SQLFunctionCtx *pCtx) { } } -static bool ts_comp_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool ts_comp_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; // not initialized since it has been initialized } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - pInfo->pTSBuf = tsBufCreate(false, pCtx->order); pInfo->pTSBuf->tsOrder = pCtx->order; return true; @@ -4599,23 +3779,6 @@ static void ts_comp_function(SQLFunctionCtx *pCtx) { pResInfo->hasResult = DATA_SET_FLAG; } -static void ts_comp_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - STSCompInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - - STSBuf *pTSbuf = pInfo->pTSBuf; - - tsBufAppend(pTSbuf, (int32_t)pCtx->param[0].i64, &pCtx->tag, pData, TSDB_KEYSIZE); - SET_VAL(pCtx, pCtx->size, 1); - - pResInfo->hasResult = DATA_SET_FLAG; -} - static void ts_comp_finalize(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); @@ -4673,13 +3836,11 @@ static double do_calc_rate(const SRateInfo* pRateInfo, double tickPerSec) { return (duration > 0)? ((double)diff) / (duration/tickPerSec):0.0; } -static bool rate_function_setup(SQLFunctionCtx *pCtx) { - if (!function_setup(pCtx)) { +static bool rate_function_setup(SQLFunctionCtx *pCtx, SResultRowCellInfo* pResInfo) { + if (!function_setup(pCtx, pResInfo)) { return false; } - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); pInfo->correctionValue = 0; pInfo->firstKey = INT64_MIN; @@ -4745,46 +3906,6 @@ static void rate_function(SQLFunctionCtx *pCtx) { } } -static void rate_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // NOTE: keep the intermediate result into the interResultBuf - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); - TSKEY *primaryKey = GET_TS_LIST(pCtx); - - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - if ((INT64_MIN == pRateInfo->firstValue) || (INT64_MIN == pRateInfo->firstKey)) { - pRateInfo->firstValue = v; - pRateInfo->firstKey = primaryKey[index]; - } - - if (INT64_MIN == pRateInfo->lastValue) { - pRateInfo->lastValue = v; - } else if (v < pRateInfo->lastValue) { - pRateInfo->correctionValue += pRateInfo->lastValue; - } - - pRateInfo->lastValue = v; - pRateInfo->lastKey = primaryKey[index]; - - SET_VAL(pCtx, 1, 1); - - // set has result flag - pRateInfo->hasResult = DATA_SET_FLAG; - pResInfo->hasResult = DATA_SET_FLAG; - - // keep the data into the final output buffer for super table query since this execution may be the last one - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); - } -} - static void rate_func_copy(SQLFunctionCtx *pCtx) { assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); @@ -4802,7 +3923,7 @@ static void rate_finalizer(SQLFunctionCtx *pCtx) { return; } - *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, TSDB_TICK_PER_SECOND(pCtx->param[0].i64)); + *(double*) pCtx->pOutput = do_calc_rate(pRateInfo, (double) TSDB_TICK_PER_SECOND(pCtx->param[0].i64)); // cannot set the numOfIteratedElems again since it is set during previous iteration pResInfo->numOfRes = 1; @@ -4817,7 +3938,7 @@ static void irate_function(SQLFunctionCtx *pCtx) { int32_t notNullElems = 0; SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); TSKEY *primaryKey = GET_TS_LIST(pCtx); - + for (int32_t i = pCtx->size - 1; i >= 0; --i) { char *pData = GET_INPUT_DATA(pCtx, i); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { @@ -4855,46 +3976,13 @@ static void irate_function(SQLFunctionCtx *pCtx) { } } -static void irate_function_f(SQLFunctionCtx *pCtx, int32_t index) { - void *pData = GET_INPUT_DATA(pCtx, index); - if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { - return; - } - - // NOTE: keep the intermediate result into the interResultBuf - SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - SRateInfo *pRateInfo = (SRateInfo *)GET_ROWCELL_INTERBUF(pResInfo); - TSKEY *primaryKey = GET_TS_LIST(pCtx); - - double v = 0; - GET_TYPED_DATA(v, double, pCtx->inputType, pData); - - pRateInfo->firstKey = pRateInfo->lastKey; - pRateInfo->firstValue = pRateInfo->lastValue; - - pRateInfo->lastValue = v; - pRateInfo->lastKey = primaryKey[index]; - -// qDebug("====%p irate_function_f() index:%d lastValue:%" PRId64 " lastKey:%" PRId64 " firstValue:%" PRId64 " firstKey:%" PRId64, pCtx, index, pRateInfo->lastValue, pRateInfo->lastKey, pRateInfo->firstValue , pRateInfo->firstKey); - SET_VAL(pCtx, 1, 1); - - // set has result flag - pRateInfo->hasResult = DATA_SET_FLAG; - pResInfo->hasResult = DATA_SET_FLAG; - - // keep the data into the final output buffer for super table query since this execution may be the last one - if (pCtx->stableQuery) { - memcpy(pCtx->pOutput, GET_ROWCELL_INTERBUF(pResInfo), sizeof(SRateInfo)); - } -} - void blockInfo_func(SQLFunctionCtx* pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo); int32_t len = *(int32_t*) pCtx->pInput; blockDistInfoFromBinary((char*)pCtx->pInput + sizeof(int32_t), len, pDist); - pDist->rowSize = (int16_t) pCtx->param[0].i64; + pDist->rowSize = (uint16_t)pCtx->param[0].i64; memcpy(pCtx->pOutput, pCtx->pInput, sizeof(int32_t) + len); @@ -5041,7 +4129,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STableBlockDist* pDist = (STableBlockDist*) GET_ROWCELL_INTERBUF(pResInfo); - pDist->rowSize = (int16_t)pCtx->param[0].i64; + pDist->rowSize = (uint16_t)pCtx->param[0].i64; generateBlockDistResult(pDist, pCtx->pOutput); // cannot set the numOfIteratedElems again since it is set during previous iteration @@ -5056,8 +4144,7 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) { * function compatible list. * tag and ts are not involved in the compatibility check * - * 1. functions that are not simultaneously present with any other functions. e.g., - * diff/ts_z/top/bottom + * 1. functions that are not simultaneously present with any other functions. e.g., diff/ts_z/top/bottom * 2. functions that are only allowed to be present only with same functions. e.g., last_row, interp * 3. functions that are allowed to be present with other functions. * e.g., count/sum/avg/min/max/stddev/percentile/apercentile/first/last... @@ -5071,7 +4158,7 @@ int32_t functionCompatList[] = { // tag, colprj, tagprj, arithmetic, diff, first_dist, last_dist, stddev_dst, interp rate irate 1, 1, 1, 1, -1, 1, 1, 1, 5, 1, 1, // tid_tag, blk_info - 6, 7 + 6, 7 }; SAggFunctionInfo aAggs[] = {{ @@ -5082,7 +4169,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, count_function, - count_function_f, doFinalizer, count_func_merge, countRequired, @@ -5095,7 +4181,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, sum_function, - sum_function_f, function_finalizer, sum_func_merge, statisRequired, @@ -5108,7 +4193,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, avg_function, - avg_function_f, avg_finalizer, avg_func_merge, statisRequired, @@ -5121,7 +4205,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, min_func_setup, min_function, - min_function_f, function_finalizer, min_func_merge, statisRequired, @@ -5134,7 +4217,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, max_func_setup, max_function, - max_function_f, function_finalizer, max_func_merge, statisRequired, @@ -5147,7 +4229,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, function_setup, stddev_function, - stddev_function_f, stddev_finalizer, noop1, dataBlockRequired, @@ -5160,7 +4241,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, percentile_function_setup, percentile_function, - percentile_function_f, percentile_finalizer, noop1, dataBlockRequired, @@ -5173,7 +4253,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE, apercentile_function_setup, apercentile_function, - apercentile_function_f, apercentile_finalizer, apercentile_func_merge, dataBlockRequired, @@ -5186,7 +4265,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, function_setup, first_function, - first_function_f, function_finalizer, noop1, firstFuncRequired, @@ -5199,7 +4277,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_SELECTIVITY, function_setup, last_function, - last_function_f, function_finalizer, noop1, lastFuncRequired, @@ -5213,7 +4290,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, last_row_function, - noop2, last_row_finalizer, last_dist_func_merge, dataBlockRequired, @@ -5227,7 +4303,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SELECTIVITY, top_bottom_function_setup, top_function, - top_function_f, top_bottom_func_finalizer, top_func_merge, dataBlockRequired, @@ -5241,7 +4316,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SELECTIVITY, top_bottom_function_setup, bottom_function, - bottom_function_f, top_bottom_func_finalizer, bottom_func_merge, dataBlockRequired, @@ -5254,7 +4328,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, spread_function_setup, spread_function, - spread_function_f, spread_function_finalizer, spread_func_merge, countRequired, @@ -5267,7 +4340,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, twa_function_setup, twa_function, - twa_function_f, twa_function_finalizer, twa_function_copy, dataBlockRequired, @@ -5280,7 +4352,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STREAM | TSDB_FUNCSTATE_OF, leastsquares_function_setup, leastsquares_function, - leastsquares_function_f, leastsquares_finalizer, noop1, dataBlockRequired, @@ -5293,7 +4364,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, date_col_output_function, - date_col_output_function_f, doFinalizer, copy_function, noDataRequired, @@ -5306,7 +4376,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, function_setup, noop1, - noop2, doFinalizer, copy_function, dataBlockRequired, @@ -5319,7 +4388,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, tag_function, - noop2, doFinalizer, copy_function, noDataRequired, @@ -5332,7 +4400,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_NEED_TS, ts_comp_function_setup, ts_comp_function, - ts_comp_function_f, ts_comp_finalize, copy_function, dataBlockRequired, @@ -5345,7 +4412,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO, function_setup, tag_function, - tag_function_f, doFinalizer, copy_function, noDataRequired, @@ -5358,7 +4424,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_MO | TSDB_FUNCSTATE_NEED_TS, function_setup, col_project_function, - col_project_function_f, doFinalizer, copy_function, dataBlockRequired, @@ -5371,7 +4436,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_MO, function_setup, tag_project_function, - tag_project_function_f, doFinalizer, copy_function, noDataRequired, @@ -5384,7 +4448,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS, function_setup, arithmetic_function, - arithmetic_function_f, doFinalizer, copy_function, dataBlockRequired, @@ -5397,7 +4460,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, diff_function_setup, diff_function, - diff_function_f, doFinalizer, noop1, dataBlockRequired, @@ -5411,7 +4473,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, first_dist_function, - first_dist_function_f, function_finalizer, first_dist_func_merge, firstDistFuncRequired, @@ -5424,7 +4485,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, first_last_function_setup, last_dist_function, - last_dist_function_f, function_finalizer, last_dist_func_merge, lastDistFuncRequired, @@ -5437,7 +4497,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, function_setup, stddev_dst_function, - stddev_dst_function_f, stddev_dst_finalizer, stddev_dst_merge, dataBlockRequired, @@ -5450,7 +4509,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_OF | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS , function_setup, interp_function, - do_sum_f, // todo filter handle doFinalizer, copy_function, dataBlockRequired, @@ -5463,7 +4521,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, rate_function_setup, rate_function, - rate_function_f, rate_finalizer, rate_func_copy, dataBlockRequired, @@ -5476,7 +4533,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS, rate_function_setup, irate_function, - irate_function_f, rate_finalizer, rate_func_copy, dataBlockRequired, @@ -5489,7 +4545,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE, function_setup, noop1, - noop2, noop1, noop1, dataBlockRequired, @@ -5501,7 +4556,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY, deriv_function_setup, deriv_function, - noop2, doFinalizer, noop1, dataBlockRequired, @@ -5514,7 +4568,6 @@ SAggFunctionInfo aAggs[] = {{ TSDB_FUNCSTATE_SO | TSDB_FUNCSTATE_STABLE, function_setup, blockInfo_func, - noop2, blockinfo_func_finalizer, block_func_merge, dataBlockRequired, diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 95f9a6b9557cd020d5f5bc2695fed3a747e5c103..199599468f8ee112784b4ed809eddf1a5547ba86 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -16,6 +16,7 @@ #include "qFill.h" #include "taosmsg.h" #include "tglobal.h" +#include "talgo.h" #include "exception.h" #include "hash.h" @@ -26,11 +27,13 @@ #include "queryLog.h" #include "tlosertree.h" #include "ttype.h" +#include "tcompare.h" #include "tscompression.h" +#include "qScript.h" #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) #define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) -#define IS_REPEAT_SCAN(runtime) ((runtime)->scanFlag == REPEAT_SCAN) +#define IS_REPEAT_SCAN(runtime) ((runtime)->scanFlag == REPEAT_SCAN) #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) #define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN) @@ -94,12 +97,47 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) +#define TSKEY_MAX_ADD(a,b) \ +do { \ + if (a < 0) { a = a + b; break;} \ + if (sizeof(a) == sizeof(int32_t)) { \ + if((b) > 0 && ((b) >= INT32_MAX - (a))){\ + a = INT32_MAX; \ + } else { \ + a = a + b; \ + } \ + } else { \ + if((b) > 0 && ((b) >= INT64_MAX - (a))){\ + a = INT64_MAX; \ + } else { \ + a = a + b; \ + } \ + } \ +} while(0) + +#define TSKEY_MIN_SUB(a,b) \ +do { \ + if (a >= 0) { a = a + b; break;} \ + if (sizeof(a) == sizeof(int32_t)){ \ + if((b) < 0 && ((b) <= INT32_MIN - (a))){\ + a = INT32_MIN; \ + } else { \ + a = a + b; \ + } \ + } else { \ + if((b) < 0 && ((b) <= INT64_MIN-(a))) {\ + a = INT64_MIN; \ + } else { \ + a = a + b; \ + } \ + } \ +} while (0) + uint64_t queryHandleId = 0; int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; } - int64_t genQueryId(void) { int64_t uid = 0; int64_t did = tsDnodeId; @@ -161,7 +199,7 @@ static void setResultOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResul int32_t numOfCols, int32_t* rowCellInfoOffset); void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset); -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId); +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx); static void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColIndex* pColIndex); @@ -177,14 +215,13 @@ static STsdbQueryCond createTsdbQueryCond(SQueryAttr* pQueryAttr, STimeWindow* w static STableIdInfo createTableIdInfo(STableQueryInfo* pTableQueryInfo); static void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInfo* pDownstream); -static void* doDestroyFilterInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols); static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr); static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); -static void destroyArithOperatorInfo(void* param, int32_t numOfOutput); +static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); @@ -207,7 +244,70 @@ static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowIn SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex); -// setup the output buffer for each operator +SArray* getOrderCheckColumns(SQueryAttr* pQuery); + + +typedef struct SRowCompSupporter { + SQueryRuntimeEnv *pRuntimeEnv; + int16_t dataOffset; + __compar_fn_t comFunc; +} SRowCompSupporter; + +static int compareRowData(const void *a, const void *b, const void *userData) { + const SResultRow *pRow1 = (const SResultRow *)a; + const SResultRow *pRow2 = (const SResultRow *)b; + + SRowCompSupporter *supporter = (SRowCompSupporter *)userData; + SQueryRuntimeEnv* pRuntimeEnv = supporter->pRuntimeEnv; + + tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pRow1->pageId); + tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pRow2->pageId); + + int16_t offset = supporter->dataOffset; + char *in1 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page1, pRow1->offset, offset); + char *in2 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page2, pRow2->offset, offset); + + return (in1 != NULL && in2 != NULL) ? supporter->comFunc(in1, in2) : 0; +} + +static void sortGroupResByOrderList(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, SSDataBlock* pDataBlock) { + SArray *columnOrderList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr); + size_t size = taosArrayGetSize(columnOrderList); + taosArrayDestroy(columnOrderList); + + if (size <= 0) { + return; + } + + int32_t orderId = pRuntimeEnv->pQueryAttr->order.orderColId; + if (orderId <= 0) { + return; + } + + bool found = false; + int16_t dataOffset = 0; + + for (int32_t j = 0; j < pDataBlock->info.numOfCols; ++j) { + SColumnInfoData* pColInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock->pDataBlock, j); + if (orderId == j) { + found = true; + break; + } + + dataOffset += pColInfoData->info.bytes; + } + + if (found == false) { + return; + } + + int16_t type = pRuntimeEnv->pQueryAttr->pExpr1[orderId].base.resType; + + SRowCompSupporter support = {.pRuntimeEnv = pRuntimeEnv, .dataOffset = dataOffset, .comFunc = getComparFunc(type, 0)}; + taosArraySortPWithExt(pGroupResInfo->pRows, compareRowData, &support); +} + +//setup the output buffer for each operator SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows) { const static int32_t minSize = 8; @@ -309,7 +409,7 @@ static bool isProjQuery(SQueryAttr *pQueryAttr) { return true; } -static bool hasNullRv(SColIndex* pColIndex, SDataStatis *pStatis) { +static bool hasNull(SColIndex* pColIndex, SDataStatis *pStatis) { if (TSDB_COL_IS_TAG(pColIndex->flag) || TSDB_COL_IS_UD_COL(pColIndex->flag) || pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { return false; } @@ -347,10 +447,10 @@ static void prepareResultListBuffer(SResultRowInfo* pResultRowInfo, SQueryRuntim pResultRowInfo->capacity = (int32_t)newCapacity; } -static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData, - int16_t bytes, bool masterscan, uint64_t uid) { +static SResultRow* doSetResultOutBufByKey(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, int64_t tid, + char* pData, int16_t bytes, bool masterscan, uint64_t tableGroupId) { bool existed = false; - SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid); + SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tableGroupId); SResultRow **p1 = (SResultRow **)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); @@ -362,16 +462,26 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes } if (p1 != NULL) { - for(int32_t i = pResultRowInfo->size - 1; i >= 0; --i) { - if (pResultRowInfo->pResult[i] == (*p1)) { - pResultRowInfo->curIndex = i; + if (pResultRowInfo->size == 0) { + existed = false; + assert(pResultRowInfo->curPos == -1); + } else if (pResultRowInfo->size == 1) { + existed = (pResultRowInfo->pResult[0] == (*p1)); + pResultRowInfo->curPos = 0; + } else { // check if current pResultRowInfo contains the existed pResultRow + SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo); + int64_t* index = taosHashGet(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes)); + if (index != NULL) { + pResultRowInfo->curPos = (int32_t) *index; existed = true; - break; + } else { + existed = false; } } } } else { - if (p1 != NULL) { // group by column query + // In case of group by column query, the required SResultRow object must be existed in the pResultRowInfo object. + if (p1 != NULL) { return *p1; } } @@ -393,8 +503,12 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes pResult = *p1; } - pResultRowInfo->pResult[pResultRowInfo->size] = pResult; - pResultRowInfo->curIndex = pResultRowInfo->size++; + pResultRowInfo->curPos = pResultRowInfo->size; + pResultRowInfo->pResult[pResultRowInfo->size++] = pResult; + + int64_t index = pResultRowInfo->curPos; + SET_RES_EXT_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, tid, pResultRowInfo); + taosHashPut(pRuntimeEnv->pResultRowListSet, pRuntimeEnv->keyBuf, GET_RES_EXT_WINDOW_KEY_LEN(bytes), &index, POINTER_BYTES); } // too many time window in query @@ -402,7 +516,7 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } - return getResultRow(pResultRowInfo, pResultRowInfo->curIndex); + return pResultRowInfo->pResult[pResultRowInfo->curPos]; } static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWindow* w) { @@ -433,13 +547,8 @@ static void getInitialStartTimeWindow(SQueryAttr* pQueryAttr, TSKEY ts, STimeWin static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t ts, SQueryAttr *pQueryAttr) { STimeWindow w = {0}; - if (pResultRowInfo->curIndex == -1) { // the first window, from the previous stored value - if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { - getInitialStartTimeWindow(pQueryAttr, ts, &w); - pResultRowInfo->prevSKey = w.skey; - } else { - w.skey = pResultRowInfo->prevSKey; - } + if (pResultRowInfo->curPos == -1) { // the first window, from the previous stored value + getInitialStartTimeWindow(pQueryAttr, ts, &w); if (pQueryAttr->interval.intervalUnit == 'n' || pQueryAttr->interval.intervalUnit == 'y') { w.ekey = taosTimeAdd(w.skey, pQueryAttr->interval.interval, pQueryAttr->interval.intervalUnit, pQueryAttr->precision) - 1; @@ -447,9 +556,7 @@ static STimeWindow getActiveTimeWindow(SResultRowInfo * pResultRowInfo, int64_t w.ekey = w.skey + pQueryAttr->interval.interval - 1; } } else { - int32_t slot = curTimeWindowIndex(pResultRowInfo); - SResultRow* pWindowRes = getResultRow(pResultRowInfo, slot); - w = pWindowRes->win; + w = getResultRow(pResultRowInfo, pResultRowInfo->curPos)->win; } if (w.skey > ts || w.ekey < ts) { @@ -529,13 +636,13 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf return 0; } -static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, STimeWindow *win, - bool masterscan, SResultRow **pResult, int64_t groupId, SQLFunctionCtx* pCtx, +static int32_t setResultOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int64_t tid, STimeWindow *win, + bool masterscan, SResultRow **pResult, int64_t tableGroupId, SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowCellInfoOffset) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; - SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, groupId); + SResultRow *pResultRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&win->skey, TSDB_KEYSIZE, masterscan, tableGroupId); if (pResultRow == NULL) { *pResult = NULL; return TSDB_CODE_SUCCESS; @@ -543,7 +650,7 @@ static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRow // not assign result buffer yet, add new result buffer if (pResultRow->pageId == -1) { - int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) groupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize); + int32_t ret = addNewWindowResultBuf(pResultRow, pResultBuf, (int32_t) tableGroupId, pRuntimeEnv->pQueryAttr->intermediateResultRowSize); if (ret != TSDB_CODE_SUCCESS) { return -1; } @@ -634,7 +741,13 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey, // all result rows are closed, set the last one to be the skey if (skey == TSKEY_INITIAL_VAL) { - pResultRowInfo->curIndex = pResultRowInfo->size - 1; + if (pResultRowInfo->size == 0) { +// assert(pResultRowInfo->current == NULL); + assert(pResultRowInfo->curPos == -1); + pResultRowInfo->curPos = -1; + } else { + pResultRowInfo->curPos = pResultRowInfo->size - 1; + } } else { for (i = pResultRowInfo->size - 1; i >= 0; --i) { @@ -645,12 +758,10 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey, } if (i == pResultRowInfo->size - 1) { - pResultRowInfo->curIndex = i; + pResultRowInfo->curPos = i; } else { - pResultRowInfo->curIndex = i + 1; // current not closed result object + pResultRowInfo->curPos = i + 1; // current not closed result object } - - pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey; } } @@ -658,7 +769,7 @@ static void updateResultRowInfoActiveIndex(SResultRowInfo* pResultRowInfo, SQuer bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); if ((lastKey > pQueryAttr->window.ekey && ascQuery) || (lastKey < pQueryAttr->window.ekey && (!ascQuery))) { closeAllResultRows(pResultRowInfo); - pResultRowInfo->curIndex = pResultRowInfo->size - 1; + pResultRowInfo->curPos = pResultRowInfo->size - 1; } else { int32_t step = ascQuery ? 1 : -1; doUpdateResultRowIndex(pResultRowInfo, lastKey - step, ascQuery, pQueryAttr->timeWindowInterpo); @@ -705,15 +816,89 @@ static int32_t getNumOfRowsInTimeWindow(SQueryRuntimeEnv* pRuntimeEnv, SDataBloc return num; } +void doInvokeUdf(SUdfInfo* pUdfInfo, SQLFunctionCtx *pCtx, int32_t idx, int32_t type) { + int32_t output = 0; + + if (pUdfInfo == NULL || pUdfInfo->funcs[type] == NULL) { + qError("empty udf function, type:%d", type); + return; + } + + qDebug("invoke udf function:%s,%p", pUdfInfo->name, pUdfInfo->funcs[type]); + + switch (type) { + case TSDB_UDF_FUNC_NORMAL: + if (pUdfInfo->isScript) { + (*(scriptNormalFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL])(pUdfInfo->pScriptCtx, + (char *)pCtx->pInput + idx * pCtx->inputType, pCtx->inputType, pCtx->inputBytes, pCtx->size, pCtx->ptsList, pCtx->startTs, pCtx->pOutput, + (char *)pCtx->ptsOutputBuf, &output, pCtx->outputType, pCtx->outputBytes); + } else { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + + void *interBuf = (void *)GET_ROWCELL_INTERBUF(pResInfo); + + (*(udfNormalFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL])((char *)pCtx->pInput + idx * pCtx->inputType, pCtx->inputType, pCtx->inputBytes, pCtx->size, pCtx->ptsList, + pCtx->pOutput, interBuf, (char *)pCtx->ptsOutputBuf, &output, pCtx->outputType, pCtx->outputBytes, &pUdfInfo->init); + } + + if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { + pCtx->resultInfo->numOfRes = output; + } else { + pCtx->resultInfo->numOfRes += output; + } + + if (pCtx->resultInfo->numOfRes > 0) { + pCtx->resultInfo->hasResult = DATA_SET_FLAG; + } + + break; + + case TSDB_UDF_FUNC_MERGE: + if (pUdfInfo->isScript) { + (*(scriptMergeFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE])(pUdfInfo->pScriptCtx, pCtx->pInput, pCtx->size, pCtx->pOutput, &output); + } else { + (*(udfMergeFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE])(pCtx->pInput, pCtx->size, pCtx->pOutput, &output, &pUdfInfo->init); + } + + // set the output value exist + pCtx->resultInfo->numOfRes = output; + if (output > 0) { + pCtx->resultInfo->hasResult = DATA_SET_FLAG; + } + + break; + + case TSDB_UDF_FUNC_FINALIZE: { + SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); + void *interBuf = (void *)GET_ROWCELL_INTERBUF(pResInfo); + if (pUdfInfo->isScript) { + (*(scriptFinalizeFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE])(pUdfInfo->pScriptCtx, pCtx->startTs, pCtx->pOutput, &output); + } else { + (*(udfFinalizeFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE])(pCtx->pOutput, interBuf, &output, &pUdfInfo->init); + } + // set the output value exist + pCtx->resultInfo->numOfRes = output; + if (output > 0) { + pCtx->resultInfo->hasResult = DATA_SET_FLAG; + } + + break; + } + } + + return; +} + static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, STimeWindow* pWin, int32_t offset, int32_t forwardStep, TSKEY* tsCol, int32_t numOfTotal, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; - bool hasPrev = pCtx[0].preAggVals.isSet; + bool hasAggregates = pCtx[0].preAggVals.isSet; for (int32_t k = 0; k < numOfOutput; ++k) { - pCtx[k].size = forwardStep; + pCtx[k].size = forwardStep; pCtx[k].startTs = pWin->skey; + // keep it temporarialy char* start = pCtx[k].pInput; int32_t pos = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? offset : offset - (forwardStep - 1); @@ -725,20 +910,24 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx pCtx[k].ptsList = &tsCol[pos]; } - int32_t functionId = pCtx[k].functionId; - // not a whole block involved in query processing, statistics data can not be used // NOTE: the original value of isSet have been changed here if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) { pCtx[k].preAggVals.isSet = false; } - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { - aAggs[functionId].xFunction(&pCtx[k]); + int32_t functionId = pCtx[k].functionId; + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { + if (functionId < 0) { // load the script and exec, pRuntimeEnv->pUdfInfo + SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; + doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); + } else { + aAggs[functionId].xFunction(&pCtx[k]); + } } // restore it - pCtx[k].preAggVals.isSet = hasPrev; + pCtx[k].preAggVals.isSet = hasAggregates; pCtx[k].pInput = start; } } @@ -847,9 +1036,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in } } -// window start key interpolation - - static void saveDataBlockLastRow(SQueryRuntimeEnv* pRuntimeEnv, SDataBlockInfo* pDataBlockInfo, SArray* pDataBlock, int32_t rowIndex) { if (pDataBlock == NULL) { @@ -880,7 +1066,9 @@ static TSKEY getStartTsKey(SQueryAttr* pQueryAttr, STimeWindow* win, const TSKEY static void setArithParams(SArithmeticSupport* sas, SExprInfo *pExprInfo, SSDataBlock* pSDataBlock) { sas->numOfCols = (int32_t) pSDataBlock->info.numOfCols; sas->pExprInfo = pExprInfo; - + if (sas->colList != NULL) { + return; + } sas->colList = calloc(1, pSDataBlock->info.numOfCols*sizeof(SColumnInfo)); for(int32_t i = 0; i < sas->numOfCols; ++i) { SColumnInfoData* pColData = taosArrayGet(pSDataBlock->pDataBlock, i); @@ -916,7 +1104,7 @@ void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlo doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order); } } else { - if (/*pCtx[0].pInput == NULL && */pBlock->pDataBlock != NULL) { + if (pBlock->pDataBlock != NULL) { doSetInputDataBlock(pOperator, pCtx, pBlock, order); } else { doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order); @@ -945,6 +1133,13 @@ static void doSetInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, pCtx[i].pInput = p->pData; assert(p->info.colId == pColIndex->colId && pCtx[i].inputType == p->info.type); + if (pCtx[i].functionId < 0) { + SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0); + pCtx[i].ptsList = (int64_t*) tsInfo->pData; + + continue; + } + uint32_t status = aAggs[pCtx[i].functionId].status; if ((status & (TSDB_FUNCSTATE_SELECTIVITY | TSDB_FUNCSTATE_NEED_TS)) != 0) { SColumnInfoData* tsInfo = taosArrayGet(pBlock->pDataBlock, 0); @@ -975,15 +1170,21 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { - int32_t functionId = pCtx[k].functionId; - if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) { + if (functionNeedToExecute(pRuntimeEnv, &pCtx[k])) { pCtx[k].startTs = startTs;// this can be set during create the struct - aAggs[functionId].xFunction(&pCtx[k]); + + int32_t functionId = pCtx[k].functionId; + if (functionId < 0) { + SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; + doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); + } else { + aAggs[functionId].xFunction(&pCtx[k]); + } } } } -static void arithmeticApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) { +static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; for (int32_t k = 0; k < numOfOutput; ++k) { @@ -994,7 +1195,15 @@ static void arithmeticApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionC pCtx[k].order = TSDB_ORDER_ASC; } - aAggs[pCtx[k].functionId].xFunction(&pCtx[k]); + pCtx[k].startTs = pQueryAttr->window.skey; + + if (pCtx[k].functionId < 0) { + // load the script and exec + SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo; + doInvokeUdf(pUdfInfo, &pCtx[k], 0, TSDB_UDF_FUNC_NORMAL); + } else { + aAggs[pCtx[k].functionId].xFunction(&pCtx[k]); + } } } @@ -1162,7 +1371,7 @@ static void doWindowBorderInterpolation(SOperatorInfo* pOperatorInfo, SSDataBloc } } -static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t groupId) { +static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pSDataBlock, int32_t tableGroupId) { STableIntervalOperatorInfo* pInfo = (STableIntervalOperatorInfo*) pOperatorInfo->info; SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv; @@ -1172,7 +1381,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order); bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr); - int32_t prevIndex = curTimeWindowIndex(pResultRowInfo); + int32_t prevIndex = pResultRowInfo->curPos; TSKEY* tsCols = NULL; if (pSDataBlock->pDataBlock != NULL) { @@ -1189,7 +1398,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); SResultRow* pResult = NULL; - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &win, masterScan, &pResult, groupId, pInfo->pCtx, + int32_t ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -1201,36 +1410,35 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul getNumOfRowsInTimeWindow(pRuntimeEnv, &pSDataBlock->info, tsCols, startPos, ekey, binarySearchForKey, true); // prev time window not interpolation yet. - int32_t curIndex = curTimeWindowIndex(pResultRowInfo); + int32_t curIndex = pResultRowInfo->curPos; if (prevIndex != -1 && prevIndex < curIndex && pQueryAttr->timeWindowInterpo) { for (int32_t j = prevIndex; j < curIndex; ++j) { // previous time window may be all closed already. - SResultRow* pRes = pResultRowInfo->pResult[j]; + SResultRow* pRes = getResultRow(pResultRowInfo, j); if (pRes->closed) { - assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && - resultRowInterpolated(pRes, RESULT_ROW_END_INTERP)); + assert(resultRowInterpolated(pRes, RESULT_ROW_START_INTERP) && resultRowInterpolated(pRes, RESULT_ROW_END_INTERP)); continue; } - STimeWindow w = pRes->win; - ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &w, masterScan, &pResult, groupId, pInfo->pCtx, - numOfOutput, pInfo->rowCellInfoOffset); - if (ret != TSDB_CODE_SUCCESS) { - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); - } + STimeWindow w = pRes->win; + ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &w, masterScan, &pResult, + tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); + if (ret != TSDB_CODE_SUCCESS) { + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); + } - assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); + assert(!resultRowInterpolated(pResult, RESULT_ROW_END_INTERP)); - doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY *)pRuntimeEnv->prevRow[0], - -1, tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); + doTimeWindowInterpolation(pOperatorInfo, pInfo, pSDataBlock->pDataBlock, *(TSKEY*)pRuntimeEnv->prevRow[0], -1, + tsCols[startPos], startPos, w.ekey, RESULT_ROW_END_INTERP); - setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); - setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); + setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); + setNotInterpoWindowKey(pInfo->pCtx, pQueryAttr->numOfOutput, RESULT_ROW_START_INTERP); - doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); - } + doApplyFunctions(pRuntimeEnv, pInfo->pCtx, &w, startPos, 0, tsCols, pSDataBlock->info.rows, numOfOutput); + } // restore current time window - ret = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &win, masterScan, &pResult, groupId, pInfo->pCtx, + ret = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &win, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -1250,7 +1458,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul } // null data, failed to allocate more memory buffer - int32_t code = setWindowOutputBufByKey(pRuntimeEnv, pResultRowInfo, &nextWin, masterScan, &pResult, groupId, + int32_t code = setResultOutputBufByKey(pRuntimeEnv, pResultRowInfo, pSDataBlock->info.tid, &nextWin, masterScan, &pResult, tableGroupId, pInfo->pCtx, numOfOutput, pInfo->rowCellInfoOffset); if (code != TSDB_CODE_SUCCESS || pResult == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -1287,6 +1495,12 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn return; } + SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0); + int64_t* tsList = (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (int64_t*) pFirstColData->pData:NULL; + + STimeWindow w = TSWINDOW_INITIALIZER; + + int32_t num = 0; for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) { char* val = ((char*)pColInfoData->pData) + bytes * j; if (isNull(val, type)) { @@ -1294,32 +1508,58 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn } // Compare with the previous row of this column, and do not set the output buffer again if they are identical. - if (pInfo->prevData == NULL || (memcmp(pInfo->prevData, val, bytes) != 0)) { - if (pInfo->prevData == NULL) { - pInfo->prevData = malloc(bytes); - } - + if (pInfo->prevData == NULL) { + pInfo->prevData = malloc(bytes); memcpy(pInfo->prevData, val, bytes); + num++; + continue; + } - if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { - setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); + if (IS_VAR_DATA_TYPE(type)) { + int32_t len = varDataLen(val); + if(len == varDataLen(pInfo->prevData) && memcmp(varDataVal(pInfo->prevData), varDataVal(val), len) == 0) { + num++; + continue; } - - int32_t ret = - setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex); - if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code - longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); + } else { + if (memcmp(pInfo->prevData, val, bytes) == 0) { + num++; + continue; } } - for (int32_t k = 0; k < pOperator->numOfOutput; ++k) { - pInfo->binfo.pCtx[k].size = 1; - int32_t functionId = pInfo->binfo.pCtx[k].functionId; - if (functionNeedToExecute(pRuntimeEnv, &pInfo->binfo.pCtx[k], functionId)) { - aAggs[functionId].xFunctionF(&pInfo->binfo.pCtx[k], j); - } + if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData, bytes); } + + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes, item->groupIndex); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); + } + + doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, j - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); + + num = 1; + memcpy(pInfo->prevData, val, bytes); } + + if (num > 0) { + char* val = ((char*)pColInfoData->pData) + bytes * (pSDataBlock->info.rows - num); + memcpy(pInfo->prevData, val, bytes); + + if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); + } + + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex); + if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code + longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); + } + + doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); + } + + tfree(pInfo->prevData); } static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { @@ -1359,7 +1599,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf SResultRow* pResult = NULL; pInfo->curWindow.ekey = pInfo->curWindow.skey; - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, + int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, pBInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code @@ -1380,7 +1620,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf SResultRow* pResult = NULL; pInfo->curWindow.ekey = pInfo->curWindow.skey; - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, + int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, pBInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code @@ -1392,9 +1632,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf } static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) { - int64_t v = -1; - GET_TYPED_DATA(v, int64_t, type, pData); - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(type)) { if (pResultRow->key == NULL) { pResultRow->key = malloc(varDataTLen(pData)); varDataCopy(pResultRow->key, pData); @@ -1402,6 +1640,9 @@ static void setResultRowKey(SResultRow* pResultRow, char* pData, int16_t type) { assert(memcmp(pResultRow->key, pData, varDataTLen(pData)) == 0); } } else { + int64_t v = -1; + GET_TYPED_DATA(v, int64_t, type, pData); + pResultRow->win.skey = v; pResultRow->win.ekey = v; } @@ -1417,12 +1658,13 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasic // not assign result buffer yet, add new result buffer, TODO remove it char* d = pData; int16_t len = bytes; - if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + if (IS_VAR_DATA_TYPE(type)) { d = varDataVal(pData); len = varDataLen(pData); } - SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, d, len, true, groupIndex); + int64_t tid = 0; + SResultRow *pResultRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, d, len, true, groupIndex); assert (pResultRow != NULL); setResultRowKey(pResultRow, pData, type); @@ -1459,11 +1701,12 @@ static int32_t getGroupbyColumnIndex(SGroupbyExpr *pGroupbyExpr, SSDataBlock* pD return -1; } -static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { +static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; // in case of timestamp column, always generated results. + int32_t functionId = pCtx->functionId; if (functionId == TSDB_FUNC_TS) { return true; } @@ -1503,7 +1746,7 @@ void setBlockStatisInfo(SQLFunctionCtx *pCtx, SSDataBlock* pSDataBlock, SColInde pCtx->preAggVals.isSet = false; } - pCtx->hasNull = hasNullRv(pColIndex, pStatis); + pCtx->hasNull = hasNull(pColIndex, pStatis); // set the statistics data for primary time stamp column if (pCtx->functionId == TSDB_FUNC_SPREAD && pColIndex->colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { @@ -1651,8 +1894,7 @@ static SQLFunctionCtx* createSQLFunctionCtx(SQueryRuntimeEnv* pRuntimeEnv, SExpr } for(int32_t i = 1; i < numOfOutput; ++i) { - (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + - pExpr[i - 1].base.interBytes * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery)); + (*rowCellInfoOffset)[i] = (int32_t)((*rowCellInfoOffset)[i - 1] + sizeof(SResultRowCellInfo) + pExpr[i - 1].base.interBytes); } setCtxTagColumnInfo(pFuncCtx, numOfOutput); @@ -1686,11 +1928,12 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->pQueryAttr = pQueryAttr; pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); - pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t)); + pRuntimeEnv->pResultRowListSet = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + pRuntimeEnv->keyBuf = malloc(pQueryAttr->maxTableColumnWidth + sizeof(int64_t) + POINTER_BYTES); pRuntimeEnv->pool = initResultRowPool(getResultRowSize(pRuntimeEnv)); + pRuntimeEnv->prevRow = malloc(POINTER_BYTES * pQueryAttr->numOfCols + pQueryAttr->srcRowSize); pRuntimeEnv->tagVal = malloc(pQueryAttr->tagLen); - pRuntimeEnv->currentOffset = pQueryAttr->limit.offset; // NOTE: pTableCheckInfo need to update the query time range and the lastKey info pRuntimeEnv->pTableRetrieveTsMap = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); @@ -1709,7 +1952,9 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf pRuntimeEnv->prevRow[i] = pRuntimeEnv->prevRow[i - 1] + pQueryAttr->tableCols[i-1].bytes; } - *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN; + if (pQueryAttr->tableCols[0].type == TSDB_DATA_TYPE_TIMESTAMP) { + *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN; + } } qDebug("QInfo:0x%"PRIx64" init runtime environment completed", GET_QID(pRuntimeEnv)); @@ -1743,13 +1988,20 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_Groupby: { pRuntimeEnv->proot = createGroupbyOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + + int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; + if (opType != OP_DummyInput) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + } break; } case OP_SessionWindow: { pRuntimeEnv->proot = createSWindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; + if (opType != OP_DummyInput) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + } break; } case OP_MultiTableAggregate: { @@ -1769,23 +2021,26 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf break; } - case OP_Arithmetic: { // TODO refactor to remove arith operator. + case OP_Project: { // TODO refactor to remove arith operator. SOperatorInfo* prev = pRuntimeEnv->proot; if (i == 0) { - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); if (pRuntimeEnv->proot != NULL && prev->operatorType != OP_DummyInput && prev->operatorType != OP_Join) { // TODO refactor setTableScanFilterOperatorInfo(prev->info, pRuntimeEnv->proot); } } else { prev = pRuntimeEnv->proot; assert(pQueryAttr->pExpr2 != NULL); - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); + pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); } break; } case OP_StateWindow: { pRuntimeEnv->proot = createStatewindowOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); - setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + int32_t opType = pRuntimeEnv->proot->upstream[0]->operatorType; + if (opType != OP_DummyInput) { + setTableScanFilterOperatorInfo(pRuntimeEnv->proot->upstream[0]->info, pRuntimeEnv->proot); + } break; } @@ -1835,7 +2090,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf case OP_GlobalAggregate: { pRuntimeEnv->proot = createGlobalAggregateOperatorInfo(pRuntimeEnv, pRuntimeEnv->proot, pQueryAttr->pExpr3, - pQueryAttr->numOfExpr3, merger); + pQueryAttr->numOfExpr3, merger, pQueryAttr->pUdfInfo); break; } @@ -1906,6 +2161,8 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { tfree(pRuntimeEnv->sasArray); } + destroyUdfInfo(pRuntimeEnv->pUdfInfo); + destroyResultBuf(pRuntimeEnv->pResultBuf); doFreeQueryHandle(pRuntimeEnv); @@ -1923,6 +2180,9 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { taosHashCleanup(pRuntimeEnv->pTableRetrieveTsMap); pRuntimeEnv->pTableRetrieveTsMap = NULL; + taosHashCleanup(pRuntimeEnv->pResultRowListSet); + pRuntimeEnv->pResultRowListSet = NULL; + destroyOperatorInfo(pRuntimeEnv->proot); pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); @@ -2113,7 +2373,7 @@ static bool onlyFirstQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQu static bool onlyLastQuery(SQueryAttr *pQueryAttr) { return onlyOneQueryType(pQueryAttr, TSDB_FUNC_LAST, TSDB_FUNC_LAST_DST); } -static bool notContainSessionOrStateWindow(SQueryAttr *pQueryAttr) { return !(pQueryAttr->sw.gap > 0 || pQueryAttr->stateWindow); } +static bool notContainSessionOrStateWindow(SQueryAttr *pQueryAttr) { return !(pQueryAttr->sw.gap > 0 || pQueryAttr->stateWindow); } static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { bool hasFirstLastFunc = false; @@ -2149,10 +2409,11 @@ static int32_t updateBlockLoadStatus(SQueryAttr *pQuery, int32_t status) { return status; } -static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { - SQueryAttr* pQueryAttr = &pQInfo->query; - size_t t = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList); - for(int32_t i = 0; i < t; ++i) { +static void doUpdateLastKey(SQueryAttr* pQueryAttr) { + STimeWindow* win = &pQueryAttr->window; + + size_t num = taosArrayGetSize(pQueryAttr->tableGroupInfo.pGroupList); + for(int32_t i = 0; i < num; ++i) { SArray* p1 = taosArrayGetP(pQueryAttr->tableGroupInfo.pGroupList, i); size_t len = taosArrayGetSize(p1); @@ -2167,7 +2428,7 @@ static void doExchangeTimeWindow(SQInfo* pQInfo, STimeWindow* win) { } } -static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) { +static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bool stableQuery) { SQueryAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; // in case of point-interpolation query, use asc order scan @@ -2184,6 +2445,7 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } + pQueryAttr->needReverseScan = false; return; } @@ -2193,13 +2455,14 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + pQueryAttr->needReverseScan = false; + doUpdateLastKey(pQueryAttr); return; } if (pQueryAttr->pointInterpQuery && pQueryAttr->interval.interval == 0) { if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { - qDebug(msg, pQInfo, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); + qDebug(msg, pQInfo->qId, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); } @@ -2210,48 +2473,52 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo if (pQueryAttr->interval.interval == 0) { if (onlyFirstQuery(pQueryAttr)) { if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { - qDebug(msg, pQInfo, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, + qDebug(msg, pQInfo->qId, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_ASC; + pQueryAttr->needReverseScan = false; } else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) { if (QUERY_IS_ASC_QUERY(pQueryAttr)) { - qDebug(msg, pQInfo, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, + qDebug(msg, pQInfo->qId, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_DESC; + pQueryAttr->needReverseScan = false; } } else { // interval query if (stableQuery) { if (onlyFirstQuery(pQueryAttr)) { if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { - qDebug(msg, pQInfo, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC, + qDebug(msg, pQInfo->qId, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_ASC; + pQueryAttr->needReverseScan = false; } else if (onlyLastQuery(pQueryAttr)) { if (QUERY_IS_ASC_QUERY(pQueryAttr)) { - qDebug(msg, pQInfo, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC, + qDebug(msg, pQInfo->qId, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); SWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey, TSKEY); - doExchangeTimeWindow(pQInfo, &pQueryAttr->window); + doUpdateLastKey(pQueryAttr); } pQueryAttr->order.order = TSDB_ORDER_DESC; + pQueryAttr->needReverseScan = false; } } } @@ -2269,9 +2536,6 @@ static void getIntermediateBufInfo(SQueryRuntimeEnv* pRuntimeEnv, int32_t* ps, i while(((*rowsize) * MIN_ROWS_PER_PAGE) > (*ps) - overhead) { *ps = ((*ps) << 1u); } - -// pRuntimeEnv->numOfRowsPerPage = ((*ps) - sizeof(tFilePage)) / (*rowsize); -// assert(pRuntimeEnv->numOfRowsPerPage <= MAX_ROWS_PER_RESBUF_PAGE); } #define IS_PREFILTER_TYPE(_t) ((_t) != TSDB_DATA_TYPE_BINARY && (_t) != TSDB_DATA_TYPE_NCHAR) @@ -2319,14 +2583,14 @@ static bool doFilterByBlockStatistics(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis } SDataStatis* pDataBlockst = &pDataStatis[index]; - + if (pFilterInfo->info.type == TSDB_DATA_TYPE_FLOAT) { float minval = (float)(*(double *)(&pDataBlockst->min)); float maxval = (float)(*(double *)(&pDataBlockst->max)); - + for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) { if (pFilterInfo->pFilters[i].filterInfo.lowerRelOptr == TSDB_RELATION_IN) { - continue; + continue; } ret &= pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&minval, (char *)&maxval, TSDB_DATA_TYPE_FLOAT); if (ret == false) { @@ -2336,9 +2600,9 @@ static bool doFilterByBlockStatistics(SQueryRuntimeEnv* pRuntimeEnv, SDataStatis } else { for (int32_t i = 0; i < pFilterInfo->numOfFilters; ++i) { if (pFilterInfo->pFilters[i].filterInfo.lowerRelOptr == TSDB_RELATION_IN) { - continue; + continue; } - ret &= pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&pDataBlockst->min, (char *)&pDataBlockst->max, pFilterInfo->info.type); + ret &= pFilterInfo->pFilters[i].fp(&pFilterInfo->pFilters[i], (char *)&pDataBlockst->min, (char *)&pDataBlockst->max, pFilterInfo->info.type); if (ret == false) { return false; } @@ -2578,9 +2842,14 @@ static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSData int32_t colId = pTableScanInfo->pExpr[i].base.colInfo.colId; // group by + first/last should not apply the first/last block filter - status |= aAggs[functionId].dataReqFunc(&pTableScanInfo->pCtx[i], &pBlock->info.window, colId); - if ((status & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) { + if (functionId < 0) { + status |= BLK_DATA_ALL_NEEDED; return status; + } else { + status |= aAggs[functionId].dataReqFunc(&pTableScanInfo->pCtx[i], &pBlock->info.window, colId); + if ((status & BLK_DATA_ALL_NEEDED) == BLK_DATA_ALL_NEEDED) { + return status; + } } } @@ -2588,10 +2857,6 @@ static uint32_t doFilterByBlockTimeWindow(STableScanInfo* pTableScanInfo, SSData } void doSetFilterColumnInfo(SSingleColumnFilterInfo* pFilterInfo, int32_t numOfFilterCols, SSDataBlock* pBlock) { - if (numOfFilterCols > 0 && pFilterInfo[0].pData != NULL) { - return; - } - // set the initial static data value filter expression for (int32_t i = 0; i < numOfFilterCols; ++i) { for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) { @@ -2657,7 +2922,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); - if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, + if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -2703,7 +2968,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa TSKEY k = ascQuery? pBlock->info.window.skey : pBlock->info.window.ekey; STimeWindow win = getActiveTimeWindow(pTableScanInfo->pResultRowInfo, k, pQueryAttr); - if (setWindowOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, &win, masterScan, &pResult, groupId, + if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.tid, &win, masterScan, &pResult, groupId, pTableScanInfo->pCtx, pTableScanInfo->numOfOutput, pTableScanInfo->rowCellInfoOffset) != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); @@ -2889,10 +3154,14 @@ void setTagValue(SOperatorInfo* pOperatorInfo, void *pTable, SQLFunctionCtx* pCt doSetTagValueInParam(pTable, pLocalExprInfo->base.colInfo.colId, &pCtx[idx].tag, pLocalExprInfo->base.resType, pLocalExprInfo->base.resBytes); - if (IS_NUMERIC_TYPE(pLocalExprInfo->base.resType) || pLocalExprInfo->base.resType == TSDB_DATA_TYPE_BOOL) { + if (IS_NUMERIC_TYPE(pLocalExprInfo->base.resType) + || pLocalExprInfo->base.resType == TSDB_DATA_TYPE_BOOL + || pLocalExprInfo->base.resType == TSDB_DATA_TYPE_TIMESTAMP) { memcpy(pRuntimeEnv->tagVal + offset, &pCtx[idx].tag.i64, pLocalExprInfo->base.resBytes); } else { - memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen); + if (pCtx[idx].tag.pz != NULL) { + memcpy(pRuntimeEnv->tagVal + offset, pCtx[idx].tag.pz, pCtx[idx].tag.nLen); + } } offset += pLocalExprInfo->base.resBytes; @@ -3045,7 +3314,7 @@ void copyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, int32_t threshold, SSDataBl } -static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQueryInfo *pTableQueryInfo) { +static void updateTableQueryInfoForReverseScan(STableQueryInfo *pTableQueryInfo) { if (pTableQueryInfo == NULL) { return; } @@ -3057,7 +3326,12 @@ static void updateTableQueryInfoForReverseScan(SQueryAttr *pQueryAttr, STableQue pTableQueryInfo->cur.vgroupIndex = -1; // set the index to be the end slot of result rows array - pTableQueryInfo->resInfo.curIndex = pTableQueryInfo->resInfo.size - 1; + SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo; + if (pResultRowInfo->size > 0) { + pResultRowInfo->curPos = pResultRowInfo->size - 1; + } else { + pResultRowInfo->curPos = -1; + } } static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) { @@ -3071,7 +3345,7 @@ static void setupQueryRangeForReverseScan(SQueryRuntimeEnv* pRuntimeEnv) { size_t t = taosArrayGetSize(group); for (int32_t j = 0; j < t; ++j) { STableQueryInfo *pCheckInfo = taosArrayGetP(group, j); - updateTableQueryInfoForReverseScan(pQueryAttr, pCheckInfo); + updateTableQueryInfoForReverseScan(pCheckInfo); // update the last key in tableKeyInfo list, the tableKeyInfo is used to build the tsdbQueryHandle and decide // the start check timestamp of tsdbQueryHandle @@ -3110,8 +3384,8 @@ void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, i int32_t* rowCellInfoOffset = pInfo->rowCellInfoOffset; SResultRowInfo* pResultRowInfo = &pInfo->resultRowInfo; - int32_t tid = 0; - SResultRow* pRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&tid, sizeof(tid), true, uid); + int64_t tid = 0; + SResultRow* pRow = doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char *)&tid, sizeof(tid), true, uid); for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { SColumnInfoData* pData = taosArrayGet(pDataBlock->pDataBlock, i); @@ -3171,6 +3445,21 @@ void updateOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity, int32_t numOf } } +void clearOutputBuf(SOptrBasicInfo* pBInfo, int32_t *bufCapacity) { + SSDataBlock* pDataBlock = pBInfo->pRes; + + for (int32_t i = 0; i < pDataBlock->info.numOfCols; ++i) { + SColumnInfoData *pColInfo = taosArrayGet(pDataBlock->pDataBlock, i); + + int32_t functionId = pBInfo->pCtx[i].functionId; + if (functionId < 0) { + memset(pBInfo->pCtx[i].pOutput, 0, pColInfo->info.bytes * (*bufCapacity)); + } + } +} + + + void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) { for (int32_t j = 0; j < size; ++j) { SResultRowCellInfo* pResInfo = GET_RES_INFO(&pCtx[j]); @@ -3178,7 +3467,11 @@ void initCtxOutputBuffer(SQLFunctionCtx* pCtx, int32_t size) { continue; } - aAggs[pCtx[j].functionId].init(&pCtx[j]); + if (pCtx[j].functionId < 0) { // todo udf initialization + continue; + } else { + aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo); + } } } @@ -3233,9 +3526,15 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult setResultOutputBuf(pRuntimeEnv, buf, pCtx, numOfOutput, rowCellInfoOffset); for (int32_t j = 0; j < numOfOutput; ++j) { - aAggs[pCtx[j].functionId].xFinalize(&pCtx[j]); + pCtx[j].startTs = buf->win.skey; + if (pCtx[j].functionId < 0) { + doInvokeUdf(pRuntimeEnv->pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); + } else { + aAggs[pCtx[j].functionId].xFinalize(&pCtx[j]); + } } + /* * set the number of output results for group by normal columns, the number of output rows usually is 1 except * the top and bottom query @@ -3245,7 +3544,11 @@ void finalizeQueryResult(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SResult } else { for (int32_t j = 0; j < numOfOutput; ++j) { - aAggs[pCtx[j].functionId].xFinalize(&pCtx[j]); + if (pCtx[j].functionId < 0) { + doInvokeUdf(pRuntimeEnv->pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE); + } else { + aAggs[pCtx[j].functionId].xFinalize(&pCtx[j]); + } } } } @@ -3317,7 +3620,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe // Note: pResult->pos[i]->num == 0, there is only fixed number of results for each group tFilePage* bufPage = getResBufPage(pRuntimeEnv->pResultBuf, pResult->pageId); - int16_t offset = 0; + int32_t offset = 0; for (int32_t i = 0; i < numOfOutput; ++i) { pCtx[i].resultInfo = getResultCell(pResult, i, rowCellInfoOffset); @@ -3331,21 +3634,28 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe offset += pCtx[i].outputBytes; int32_t functionId = pCtx[i].functionId; - if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) { + if (functionId < 0) { + continue; + } + + if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) { pCtx[i].ptsOutputBuf = pCtx[0].pOutput; } if (!pResInfo->initialized) { - aAggs[functionId].init(&pCtx[i]); + aAggs[functionId].init(&pCtx[i], pResInfo); } } } void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, SQLFunctionCtx* pCtx, - int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex) { + int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t tableGroupId) { + // for simple group by query without interval, all the tables belong to one group result. int64_t uid = 0; + int64_t tid = 0; + SResultRow* pResultRow = - doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char*)&groupIndex, sizeof(groupIndex), true, uid); + doSetResultOutBufByKey(pRuntimeEnv, pResultRowInfo, tid, (char*)&tableGroupId, sizeof(tableGroupId), true, uid); assert (pResultRow != NULL); /* @@ -3353,7 +3663,7 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe * all group belong to one result set, and each group result has different group id so set the id to be one */ if (pResultRow->pageId == -1) { - int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, groupIndex, pRuntimeEnv->pQueryAttr->resultRowSize); + int32_t ret = addNewWindowResultBuf(pResultRow, pRuntimeEnv->pResultBuf, tableGroupId, pRuntimeEnv->pQueryAttr->resultRowSize); if (ret != TSDB_CODE_SUCCESS) { return; } @@ -3362,20 +3672,20 @@ void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pRe setResultRowOutputBufInitCtx(pRuntimeEnv, pResultRow, pCtx, numOfOutput, rowCellInfoOffset); } -void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t groupIndex, +void setExecutionContext(SQueryRuntimeEnv* pRuntimeEnv, SOptrBasicInfo* pInfo, int32_t numOfOutput, int32_t tableGroupId, TSKEY nextKey) { STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current; // lastKey needs to be updated pTableQueryInfo->lastKey = nextKey; - if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == groupIndex) { + if (pRuntimeEnv->prevGroupId != INT32_MIN && pRuntimeEnv->prevGroupId == tableGroupId) { return; } - doSetTableGroupOutputBuf(pRuntimeEnv, &pInfo->resultRowInfo, pInfo->pCtx, pInfo->rowCellInfoOffset, numOfOutput, groupIndex); + doSetTableGroupOutputBuf(pRuntimeEnv, &pInfo->resultRowInfo, pInfo->pCtx, pInfo->rowCellInfoOffset, numOfOutput, tableGroupId); // record the current active group id - pRuntimeEnv->prevGroupId = groupIndex; + pRuntimeEnv->prevGroupId = tableGroupId; } void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLFunctionCtx* pCtx, @@ -3468,6 +3778,7 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, return 0; } +// TODO refactor: this funciton should be merged with setparamForStableStddevColumnData function. void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExprInfo) { SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; @@ -3545,9 +3856,9 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; STableQueryInfo *pTableQueryInfo = pRuntimeEnv->current; - SResultRowInfo *pWindowResInfo = &pTableQueryInfo->resInfo; + SResultRowInfo *pResultRowInfo = &pTableQueryInfo->resInfo; - if (pWindowResInfo->prevSKey != TSKEY_INITIAL_VAL) { + if (pResultRowInfo->curPos != -1) { return; } @@ -3566,13 +3877,13 @@ void setIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, TSKEY key) { TSKEY ek = MAX(win.skey, win.ekey); getAlignQueryTimeWindow(pQueryAttr, win.skey, sk, ek, &w); - if (pWindowResInfo->prevSKey == TSKEY_INITIAL_VAL) { - if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { - assert(win.ekey == pQueryAttr->window.ekey); - } - - pWindowResInfo->prevSKey = w.skey; - } +// if (pResultRowInfo->prevSKey == TSKEY_INITIAL_VAL) { +// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) { +// assert(win.ekey == pQueryAttr->window.ekey); +// } +// +// pResultRowInfo->prevSKey = w.skey; +// } pTableQueryInfo->lastKey = pTableQueryInfo->win.skey; } @@ -3615,12 +3926,15 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* } int32_t numOfRowsToCopy = pRow->numOfRows; + if (numOfResult + numOfRowsToCopy >= pRuntimeEnv->resultInfo.capacity) { + break; + } pGroupResInfo->index += 1; tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pRow->pageId); - int16_t offset = 0; + int32_t offset = 0; for (int32_t j = 0; j < pBlock->info.numOfCols; ++j) { SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, j); int32_t bytes = pColInfoData->info.bytes; @@ -3657,8 +3971,8 @@ static void toSSDataBlock(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv* pRunti // refactor : extract method SColumnInfoData* pInfoData = taosArrayGet(pBlock->pDataBlock, 0); - - if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { + //add condition (pBlock->info.rows >= 1) just to runtime happy + if (pInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP && pBlock->info.rows >= 1) { STimeWindow* w = &pBlock->info.window; w->skey = *(int64_t*)pInfoData->pData; w->ekey = *(int64_t*)(((char*)pInfoData->pData) + TSDB_KEYSIZE * (pBlock->info.rows - 1)); @@ -3748,6 +4062,106 @@ int32_t doFillTimeIntervalGapsInResults(SFillInfo* pFillInfo, SSDataBlock *pOutp return pOutput->info.rows; } +void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType eventType) { + SQueryProfEvent event = {0}; + + event.eventType = eventType; + event.eventTime = taosGetTimestampUs(); + event.operatorType = operatorInfo->operatorType; + + if (operatorInfo->pRuntimeEnv) { + SQInfo* pQInfo = operatorInfo->pRuntimeEnv->qinfo; + if (pQInfo->summary.queryProfEvents) { + taosArrayPush(pQInfo->summary.queryProfEvents, &event); + } + } +} + +void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code) { + SQueryProfEvent event; + event.eventType = QUERY_PROF_QUERY_ABORT; + event.eventTime = taosGetTimestampUs(); + event.abortCode = code; + + if (pQInfo->summary.queryProfEvents) { + taosArrayPush(pQInfo->summary.queryProfEvents, &event); + } +} + +typedef struct { + uint8_t operatorType; + int64_t beginTime; + int64_t endTime; + int64_t selfTime; + int64_t descendantsTime; +} SOperatorStackItem; + +static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* event, SArray* opStack, SHashObj* profResults) { + item->endTime = event->eventTime; + item->selfTime = (item->endTime - item->beginTime) - (item->descendantsTime); + + for (int32_t j = 0; j < taosArrayGetSize(opStack); ++j) { + SOperatorStackItem* ancestor = taosArrayGet(opStack, j); + ancestor->descendantsTime += item->selfTime; + } + + uint8_t operatorType = item->operatorType; + SOperatorProfResult* result = taosHashGet(profResults, &operatorType, sizeof(operatorType)); + if (result != NULL) { + result->sumRunTimes++; + result->sumSelfTime += item->selfTime; + } else { + SOperatorProfResult opResult; + opResult.operatorType = operatorType; + opResult.sumSelfTime = item->selfTime; + opResult.sumRunTimes = 1; + taosHashPut(profResults, &(operatorType), sizeof(operatorType), + &opResult, sizeof(opResult)); + } +} + +void calculateOperatorProfResults(SQInfo* pQInfo) { + if (pQInfo->summary.queryProfEvents == NULL) { + qDebug("QInfo:0x%"PRIx64" query prof events array is null", pQInfo->qId); + return; + } + + if (pQInfo->summary.operatorProfResults == NULL) { + qDebug("QInfo:0x%"PRIx64" operator prof results hash is null", pQInfo->qId); + return; + } + + SArray* opStack = taosArrayInit(32, sizeof(SOperatorStackItem)); + if (opStack == NULL) { + return; + } + + size_t size = taosArrayGetSize(pQInfo->summary.queryProfEvents); + SHashObj* profResults = pQInfo->summary.operatorProfResults; + + for (int i = 0; i < size; ++i) { + SQueryProfEvent* event = taosArrayGet(pQInfo->summary.queryProfEvents, i); + if (event->eventType == QUERY_PROF_BEFORE_OPERATOR_EXEC) { + SOperatorStackItem opItem; + opItem.operatorType = event->operatorType; + opItem.beginTime = event->eventTime; + opItem.descendantsTime = 0; + taosArrayPush(opStack, &opItem); + } else if (event->eventType == QUERY_PROF_AFTER_OPERATOR_EXEC) { + SOperatorStackItem* item = taosArrayPop(opStack); + assert(item->operatorType == event->operatorType); + doOperatorExecProfOnce(item, event, opStack, profResults); + } else if (event->eventType == QUERY_PROF_QUERY_ABORT) { + SOperatorStackItem* item; + while ((item = taosArrayPop(opStack)) != NULL) { + doOperatorExecProfOnce(item, event, opStack, profResults); + } + } + } + + taosArrayDestroy(opStack); +} + void queryCostStatis(SQInfo *pQInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryCostInfo *pSummary = &pQInfo->summary; @@ -3768,6 +4182,8 @@ void queryCostStatis(SQInfo *pQInfo) { pSummary->numOfTimeWindows = 0; } + calculateOperatorProfResults(pQInfo); + qDebug("QInfo:0x%"PRIx64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " "load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, pQInfo->qId, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis, @@ -3775,6 +4191,15 @@ void queryCostStatis(SQInfo *pQInfo) { qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0, pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0); + + if (pSummary->operatorProfResults) { + SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL); + while (opRes != NULL) { + qDebug("QInfo:0x%" PRIx64 " :cost summary: operator : %d, exec times: %" PRId64 ", self time: %" PRId64, + pQInfo->qId, opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime); + opRes = taosHashIterate(pSummary->operatorProfResults, opRes); + } + } } //static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { @@ -4108,8 +4533,8 @@ static SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, in return pFillCol; } -int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr, int32_t tbScanner, - SArray* pOperator, void* param) { +int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr, int32_t tbScanner, SArray* pOperator, + void* param) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; @@ -4142,7 +4567,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr break; } case OP_DataBlocksOptScan: { - pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), 1); + pRuntimeEnv->proot = createDataBlocksOptScanInfo(pRuntimeEnv->pQueryHandle, pRuntimeEnv, getNumOfScanTimes(pQueryAttr), pQueryAttr->needReverseScan? 1:0); break; } case OP_TableScan: { @@ -4176,6 +4601,17 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr // create runtime environment int32_t numOfTables = (int32_t)pQueryAttr->tableGroupInfo.numOfTables; pQInfo->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo)); + pQInfo->summary.queryProfEvents = taosArrayInit(512, sizeof(SQueryProfEvent)); + if (pQInfo->summary.queryProfEvents == NULL) { + qDebug("QInfo:0x%"PRIx64" failed to allocate query prof events array", pQInfo->qId); + } + + pQInfo->summary.operatorProfResults = + taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK); + + if (pQInfo->summary.operatorProfResults == NULL) { + qDebug("QInfo:0x%"PRIx64" failed to allocate operator prof results hash", pQInfo->qId); + } code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQueryAttr->tableGroupInfo.numOfTables, pOperator, param); if (code != TSDB_CODE_SUCCESS) { @@ -4349,8 +4785,7 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) { } if (pResultRowInfo->size > 0) { - pResultRowInfo->curIndex = 0; - pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey; + pResultRowInfo->curPos = 0; } qDebug("QInfo:0x%"PRIx64" start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, @@ -4375,8 +4810,7 @@ static SSDataBlock* doTableScan(void* param, bool *newgroup) { pTableScanInfo->order = cond.order; if (pResultRowInfo->size > 0) { - pResultRowInfo->curIndex = pResultRowInfo->size-1; - pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->size-1]->win.skey; + pResultRowInfo->curPos = pResultRowInfo->size - 1; } p = doTableScanImpl(pOperator, newgroup); @@ -4535,8 +4969,8 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf pTableScanInfo->pResultRowInfo = &pInfo->resultRowInfo; pTableScanInfo->rowCellInfoOffset = pInfo->rowCellInfoOffset; - } else if (pDownstream->operatorType == OP_Arithmetic) { - SArithOperatorInfo *pInfo = pDownstream->info; + } else if (pDownstream->operatorType == OP_Project) { + SProjectOperatorInfo *pInfo = pDownstream->info; pTableScanInfo->pCtx = pInfo->binfo.pCtx; pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo; @@ -4548,7 +4982,7 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo; pTableScanInfo->rowCellInfoOffset = pInfo->binfo.rowCellInfoOffset; } else if (pDownstream->operatorType == OP_StateWindow) { - SStateWindowOperatorInfo* pInfo = pDownstream->info; + SStateWindowOperatorInfo* pInfo = pDownstream->info; pTableScanInfo->pCtx = pInfo->binfo.pCtx; pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo; @@ -4567,7 +5001,6 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime pInfo->reverseTimes = reverseTime; pInfo->current = 0; pInfo->order = pRuntimeEnv->pQueryAttr->order.order; -// pInfo->prevGroupId = -1; SOperatorInfo* pOptr = calloc(1, sizeof(SOperatorInfo)); pOptr->name = "DataBlocksOptimizedScanOperator"; @@ -4669,17 +5102,17 @@ static void destroySlimitOperatorInfo(void* param, int32_t numOfOutput) { } SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, - SExprInfo* pExpr, int32_t numOfOutput, void* param) { + SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo) { SMultiwayMergeInfo* pInfo = calloc(1, sizeof(SMultiwayMergeInfo)); pInfo->resultRowFactor = - (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, - false)); + (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pRuntimeEnv->pQueryAttr, pRuntimeEnv->pQueryAttr->topBotQuery, false)); pRuntimeEnv->scanFlag = MERGE_STAGE; // TODO init when creating pCtx pInfo->pMerge = param; pInfo->bufCapacity = 4096; + pInfo->udfInfo = pUdfInfo; pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pInfo->bufCapacity * pInfo->resultRowFactor); pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); @@ -4801,7 +5234,10 @@ static SSDataBlock* doAggregate(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { break; } @@ -4856,7 +5292,10 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { break; } @@ -4871,7 +5310,15 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - TSKEY key = QUERY_IS_ASC_QUERY(pQueryAttr)? pBlock->info.window.ekey + 1:pBlock->info.window.skey-1; + TSKEY key = 0; + if (QUERY_IS_ASC_QUERY(pQueryAttr)) { + key = pBlock->info.window.ekey; + TSKEY_MAX_ADD(key, 1); + } else { + key = pBlock->info.window.skey; + TSKEY_MIN_SUB(key, -1); + } + setExecutionContext(pRuntimeEnv, pInfo, pOperator->numOfOutput, pRuntimeEnv->current->groupIndex, key); doAggregateImpl(pOperator, pQueryAttr->window.skey, pInfo->pCtx, pBlock); } @@ -4892,23 +5339,23 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) { return pInfo->pRes; } -static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { +static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; - SArithOperatorInfo* pArithInfo = pOperator->info; + SProjectOperatorInfo* pProjectInfo = pOperator->info; SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; - SOptrBasicInfo *pInfo = &pArithInfo->binfo; + SOptrBasicInfo *pInfo = &pProjectInfo->binfo; SSDataBlock* pRes = pInfo->pRes; int32_t order = pRuntimeEnv->pQueryAttr->order.order; pRes->info.rows = 0; - if (pArithInfo->existDataBlock) { // TODO refactor + if (pProjectInfo->existDataBlock) { // TODO refactor STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; - SSDataBlock* pBlock = pArithInfo->existDataBlock; - pArithInfo->existDataBlock = NULL; + SSDataBlock* pBlock = pProjectInfo->existDataBlock; + pProjectInfo->existDataBlock = NULL; *newgroup = true; // todo dynamic set tags @@ -4918,9 +5365,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); + updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows); - arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); + projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); if (pTableQueryInfo != NULL) { updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); } @@ -4936,7 +5383,10 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { bool prevVal = *newgroup; // The upstream exec may change the value of the newgroup, so use a local variable instead. + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { assert(*newgroup == false); @@ -4948,7 +5398,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { // Return result of the previous group in the firstly. if (*newgroup) { if (pRes->info.rows > 0) { - pArithInfo->existDataBlock = pBlock; + pProjectInfo->existDataBlock = pBlock; clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); return pInfo->pRes; } else { // init output buffer for a new group data @@ -4968,9 +5418,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); + updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows); - arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); + projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); if (pTableQueryInfo != NULL) { updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); } @@ -4996,7 +5446,10 @@ static SSDataBlock* doLimit(void* param, bool* newgroup) { SSDataBlock* pBlock = NULL; while (1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; @@ -5046,7 +5499,10 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) { SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; while (1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { break; } @@ -5091,7 +5547,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { break; } @@ -5144,7 +5603,10 @@ static SSDataBlock* doSTableIntervalAgg(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { break; } @@ -5188,7 +5650,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI TSKEY* tsList = (TSKEY*)pTsColInfoData->pData; if (IS_REPEAT_SCAN(pRuntimeEnv) && !pInfo->reptScan) { pInfo->reptScan = true; - tfree(pInfo->prevData); + tfree(pInfo->prevData); } pInfo->numOfRows = 0; @@ -5198,17 +5660,17 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI continue; } if (pInfo->prevData == NULL) { - pInfo->prevData = malloc(bytes); + pInfo->prevData = malloc(bytes); memcpy(pInfo->prevData, val, bytes); pInfo->numOfRows = 1; pInfo->curWindow.skey = tsList[j]; pInfo->curWindow.ekey = tsList[j]; - pInfo->start = j; - + pInfo->start = j; + } else if (memcmp(pInfo->prevData, val, bytes) == 0) { pInfo->curWindow.ekey = tsList[j]; pInfo->numOfRows += 1; - //pInfo->start = j; + //pInfo->start = j; if (j == 0 && pInfo->start != 0) { pInfo->numOfRows = 1; pInfo->start = 0; @@ -5216,7 +5678,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI } else { SResultRow* pResult = NULL; pInfo->curWindow.ekey = pInfo->curWindow.skey; - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, + int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, pBInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code @@ -5230,13 +5692,14 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI memcpy(pInfo->prevData, val, bytes); pInfo->numOfRows = 1; pInfo->start = j; - + } } + SResultRow* pResult = NULL; pInfo->curWindow.ekey = pInfo->curWindow.skey; - int32_t ret = setWindowOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, &pInfo->curWindow, masterScan, + int32_t ret = setResultOutputBufByKey(pRuntimeEnv, &pBInfo->resultRowInfo, pSDataBlock->info.tid, &pInfo->curWindow, masterScan, &pResult, item->groupIndex, pBInfo->pCtx, pOperator->numOfOutput, pBInfo->rowCellInfoOffset); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code @@ -5246,6 +5709,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI doApplyFunctions(pRuntimeEnv, pBInfo->pCtx, &pInfo->curWindow, pInfo->start, pInfo->numOfRows, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); } + static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -5271,17 +5735,20 @@ static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) { STimeWindow win = pQueryAttr->window; SOperatorInfo* upstream = pOperator->upstream[0]; while (1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { break; } setInputDataBlock(pOperator, pBInfo->pCtx, pBlock, pQueryAttr->order.order); if (pWindowInfo->colIndex == -1) { pWindowInfo->colIndex = getGroupbyColumnIndex(pRuntimeEnv->pQueryAttr->pGroupbyExpr, pBlock); - } + } doStateWindowAggImpl(pOperator, pWindowInfo, pBlock); } - + // restore the value pQueryAttr->order.order = order; pQueryAttr->window = win; @@ -5299,7 +5766,7 @@ static SSDataBlock* doStateWindowAgg(void *param, bool* newgroup) { } return pBInfo->pRes->info.rows == 0? NULL:pBInfo->pRes; -} +} static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; if (pOperator->status == OP_EXEC_DONE) { @@ -5329,7 +5796,9 @@ static SSDataBlock* doSessionWindowAgg(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } @@ -5380,7 +5849,9 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { SOperatorInfo* upstream = pOperator->upstream[0]; while(1) { + publishOperatorProfEvent(upstream, QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = upstream->exec(upstream, newgroup); + publishOperatorProfEvent(upstream, QUERY_PROF_AFTER_OPERATOR_EXEC); if (pBlock == NULL) { break; } @@ -5406,6 +5877,9 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { } initGroupResInfo(&pRuntimeEnv->groupResInfo, &pInfo->binfo.resultRowInfo); + if (!pRuntimeEnv->pQueryAttr->stableQuery) { + sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); + } toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { @@ -5446,7 +5920,10 @@ static SSDataBlock* doFill(void* param, bool* newgroup) { } while(1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); SSDataBlock* pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + if (*newgroup) { assert(pBlock != NULL); } @@ -5606,8 +6083,8 @@ static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) { tfree(pInfo->prevData); } -static void destroyArithOperatorInfo(void* param, int32_t numOfOutput) { - SArithOperatorInfo* pInfo = (SArithOperatorInfo*) param; +static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { + SProjectOperatorInfo* pInfo = (SProjectOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); } @@ -5653,8 +6130,8 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO return pOperator; } -SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { - SArithOperatorInfo* pInfo = calloc(1, sizeof(SArithOperatorInfo)); +SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SProjectOperatorInfo* pInfo = calloc(1, sizeof(SProjectOperatorInfo)); pInfo->seed = rand(); pInfo->bufCapacity = pRuntimeEnv->resultInfo.capacity; @@ -5667,8 +6144,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); - pOperator->name = "ArithmeticOperator"; - pOperator->operatorType = OP_Arithmetic; + pOperator->name = "ProjectOperator"; + pOperator->operatorType = OP_Project; pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; pOperator->info = pInfo; @@ -5676,8 +6153,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI pOperator->numOfOutput = numOfOutput; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->exec = doArithmeticOperation; - pOperator->cleanup = destroyArithOperatorInfo; + pOperator->exec = doProjectOperation; + pOperator->cleanup = destroyProjectOperatorInfo; appendUpstream(pOperator, upstream); return pOperator; @@ -5697,8 +6174,13 @@ SColumnInfo* extractColumnFilterInfo(SExprInfo* pExpr, int32_t numOfOutput, int3 pCols[i].colId = pExpr[i].base.resColId; pCols[i].flist.numOfFilters = pExpr[i].base.flist.numOfFilters; - pCols[i].flist.filterInfo = calloc(pCols[i].flist.numOfFilters, sizeof(SColumnFilterInfo)); - memcpy(pCols[i].flist.filterInfo, pExpr[i].base.flist.filterInfo, pCols[i].flist.numOfFilters * sizeof(SColumnFilterInfo)); + if (pCols[i].flist.numOfFilters != 0) { + pCols[i].flist.filterInfo = calloc(pCols[i].flist.numOfFilters, sizeof(SColumnFilterInfo)); + memcpy(pCols[i].flist.filterInfo, pExpr[i].base.flist.filterInfo, pCols[i].flist.numOfFilters * sizeof(SColumnFilterInfo)); + } else { + // avoid runtime error + pCols[i].flist.filterInfo = NULL; + } } assert(numOfFilter > 0); @@ -5794,8 +6276,8 @@ SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpe pOperator->cleanup = destroyStateWindowOperatorInfo; appendUpstream(pOperator, upstream); - return pOperator; - + return pOperator; + } SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SSWindowOperatorInfo* pInfo = calloc(1, sizeof(SSWindowOperatorInfo)); @@ -5805,7 +6287,7 @@ SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); pInfo->prevTs = INT64_MIN; - pInfo->reptScan = false; + pInfo->reptScan = false; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); pOperator->name = "SessionWindowAggOperator"; @@ -5851,7 +6333,14 @@ SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato SGroupbyOperatorInfo* pInfo = calloc(1, sizeof(SGroupbyOperatorInfo)); pInfo->colIndex = -1; // group by column index + pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset); + + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; + + pQueryAttr->resultRowSize = (pQueryAttr->resultRowSize * + (int32_t)(GET_ROW_PARAM_FOR_MULTIOUTPUT(pQueryAttr, pQueryAttr->topBotQuery, pQueryAttr->stableQuery))); + pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity); initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT); @@ -5993,8 +6482,8 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0); - while(pInfo->currentIndex < pInfo->totalTables && count < maxNumOfTables) { - int32_t i = pInfo->currentIndex++; + while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) { + int32_t i = pInfo->curPos++; STableQueryInfo *item = taosArrayGetP(pa, i); char *output = pColInfo->pData + count * rsize; @@ -6035,11 +6524,11 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { pOperator->status = OP_EXEC_DONE; qDebug("QInfo:0x%"PRIx64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count); } else { // return only the tags|table name etc. - SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo + SExprInfo* pExprInfo = &pOperator->pExpr[0]; // todo use the column list instead of exprinfo count = 0; - while(pInfo->currentIndex < pInfo->totalTables && count < maxNumOfTables) { - int32_t i = pInfo->currentIndex++; + while(pInfo->curPos < pInfo->totalTables && count < maxNumOfTables) { + int32_t i = pInfo->curPos++; STableQueryInfo* item = taosArrayGetP(pa, i); @@ -6068,7 +6557,7 @@ static SSDataBlock* doTagScan(void* param, bool* newgroup) { count += 1; } - if (pInfo->currentIndex >= pInfo->totalTables) { + if (pInfo->curPos >= pInfo->totalTables) { pOperator->status = OP_EXEC_DONE; } @@ -6087,7 +6576,7 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf assert(numOfGroup == 0 || numOfGroup == 1); pInfo->totalTables = pRuntimeEnv->tableqinfoGroupInfo.numOfTables; - pInfo->currentIndex = 0; + pInfo->curPos = 0; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); pOperator->name = "SeqTableTagScan"; @@ -6116,15 +6605,30 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { pRes->info.rows = 0; SSDataBlock* pBlock = NULL; while(1) { + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC); pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup); + publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC); + if (pBlock == NULL) { + setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); + pOperator->status = OP_EXEC_DONE; + break; + } + if (pInfo->colIndex == -1) { + for (int i = 0; i < taosArrayGetSize(pBlock->pDataBlock); i++) { + SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, i); + if (pColDataInfo->info.colId == pOperator->pExpr[0].base.resColId) { + pInfo->colIndex = i; + break; + } + } + } + if (pInfo->colIndex == -1) { setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED); pOperator->status = OP_EXEC_DONE; return NULL; } - - assert(pBlock->info.numOfCols == 1); - SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, 0); + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pInfo->colIndex); int16_t bytes = pColInfoData->info.bytes; int16_t type = pColInfoData->info.type; @@ -6148,9 +6652,18 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { continue; } - void* res = taosHashGet(pInfo->pSet, val, bytes); + size_t keyLen = 0; + if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) { + tstr* var = (tstr*)(val); + keyLen = varDataLen(var); + } else { + keyLen = bytes; + } + + int dummy; + void* res = taosHashGet(pInfo->pSet, val, keyLen); if (res == NULL) { - taosHashPut(pInfo->pSet, val, bytes, NULL, 0); + taosHashPut(pInfo->pSet, val, keyLen, &dummy, sizeof(dummy)); char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; memcpy(start, val, bytes); pRes->info.rows += 1; @@ -6167,7 +6680,8 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { SDistinctOperatorInfo* pInfo = calloc(1, sizeof(SDistinctOperatorInfo)); - + pInfo->colIndex = -1; + pInfo->threshold = 10000000; // distinct result threshold pInfo->outputCapacity = 4096; pInfo->pSet = taosHashInit(64, taosGetDefaultHashFunction(pExpr->base.colType), false, HASH_NO_LOCK); pInfo->pRes = createOutputBuf(pExpr, numOfOutput, (int32_t) pInfo->outputCapacity); @@ -6177,10 +6691,12 @@ SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperat pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; pOperator->operatorType = OP_Distinct; + pOperator->pExpr = pExpr; pOperator->numOfOutput = numOfOutput; pOperator->info = pInfo; pOperator->pRuntimeEnv = pRuntimeEnv; pOperator->exec = hashDistinct; + pOperator->pExpr = pExpr; pOperator->cleanup = destroyDistinctOperatorInfo; appendUpstream(pOperator, upstream); @@ -6258,7 +6774,7 @@ static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) { return true; } -static UNUSED_FUNC bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput, +static bool validateQueryTableCols(SQueriedTableInfo* pTableInfo, SSqlExpr** pExpr, int32_t numOfOutput, SColumnInfo* pTagCols, void* pMsg) { int32_t numOfTotal = pTableInfo->numOfCols + pTableInfo->numOfTags; if (pTableInfo->numOfCols < 0 || pTableInfo->numOfTags < 0 || numOfTotal > TSDB_MAX_COLUMNS) { @@ -6386,6 +6902,9 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pQueryMsg->sw.primaryColId = htonl(pQueryMsg->sw.primaryColId); pQueryMsg->tableScanOperator = htonl(pQueryMsg->tableScanOperator); pQueryMsg->numOfOperator = htonl(pQueryMsg->numOfOperator); + pQueryMsg->udfContentOffset = htonl(pQueryMsg->udfContentOffset); + pQueryMsg->udfContentLen = htonl(pQueryMsg->udfContentLen); + pQueryMsg->udfNum = htonl(pQueryMsg->udfNum); // query msg safety check if (!validateQueryMsg(pQueryMsg)) { @@ -6443,6 +6962,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pExprMsg->resType = htons(pExprMsg->resType); pExprMsg->resBytes = htons(pExprMsg->resBytes); + pExprMsg->interBytes = htonl(pExprMsg->interBytes); pExprMsg->functionId = htons(pExprMsg->functionId); pExprMsg->numOfParams = htons(pExprMsg->numOfParams); @@ -6628,6 +7148,33 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pMsg += sizeof(int32_t); } + if (pQueryMsg->udfContentLen > 0) { + param->pUdfInfo = calloc(1, sizeof(SUdfInfo)); + param->pUdfInfo->contLen = pQueryMsg->udfContentLen; + + pMsg = (char*)pQueryMsg + pQueryMsg->udfContentOffset; + param->pUdfInfo->resType = *(int8_t*) pMsg; + pMsg += sizeof(int8_t); + + param->pUdfInfo->resBytes = htons(*(int16_t*)pMsg); + pMsg += sizeof(int16_t); + + tstr* name = (tstr*)(pMsg); + param->pUdfInfo->name = strndup(name->data, name->len); + + pMsg += varDataTLen(name); + param->pUdfInfo->funcType = htonl(*(int32_t*)pMsg); + pMsg += sizeof(int32_t); + + param->pUdfInfo->bufSize = htonl(*(int32_t*)pMsg); + pMsg += sizeof(int32_t); + + param->pUdfInfo->content = malloc(pQueryMsg->udfContentLen); + memcpy(param->pUdfInfo->content, pMsg, pQueryMsg->udfContentLen); + + pMsg += pQueryMsg->udfContentLen; + } + param->sql = strndup(pMsg, pQueryMsg->sqlstrLen); SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols}; @@ -6650,41 +7197,41 @@ _cleanup: return code; } - int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { - if (filterNum <= 0) { - return TSDB_CODE_SUCCESS; - } - - *dst = calloc(filterNum, sizeof(*src)); - if (*dst == NULL) { - return TSDB_CODE_QRY_OUT_OF_MEMORY; - } +int32_t cloneExprFilterInfo(SColumnFilterInfo **dst, SColumnFilterInfo* src, int32_t filterNum) { + if (filterNum <= 0) { + return TSDB_CODE_SUCCESS; + } - memcpy(*dst, src, sizeof(*src) * filterNum); + *dst = calloc(filterNum, sizeof(*src)); + if (*dst == NULL) { + return TSDB_CODE_QRY_OUT_OF_MEMORY; + } - for (int32_t i = 0; i < filterNum; i++) { - if ((*dst)[i].filterstr && dst[i]->len > 0) { - void *pz = calloc(1, (size_t)(*dst)[i].len + 1); + memcpy(*dst, src, sizeof(*src) * filterNum); - if (pz == NULL) { - if (i == 0) { - free(*dst); - } else { - freeColumnFilterInfo(*dst, i); - } + for (int32_t i = 0; i < filterNum; i++) { + if ((*dst)[i].filterstr && dst[i]->len > 0) { + void *pz = calloc(1, (size_t)(*dst)[i].len + 1); - return TSDB_CODE_QRY_OUT_OF_MEMORY; + if (pz == NULL) { + if (i == 0) { + free(*dst); + } else { + freeColumnFilterInfo(*dst, i); } - memcpy(pz, (void *)src->pz, (size_t)src->len + 1); - - (*dst)[i].pz = (int64_t)pz; + return TSDB_CODE_QRY_OUT_OF_MEMORY; } - } - return TSDB_CODE_SUCCESS; + memcpy(pz, (void *)src->pz, (size_t)src->len + 1); + + (*dst)[i].pz = (int64_t)pz; + } } + return TSDB_CODE_SUCCESS; +} + int32_t buildArithmeticExprFromMsg(SExprInfo *pExprInfo, void *pQueryMsg) { qDebug("qmsg:%p create arithmetic expr from binary", pQueryMsg); @@ -6718,7 +7265,7 @@ static int32_t updateOutputBufForTopBotQuery(SQueriedTableInfo* pTableInfo, SCol } else { SColumnInfo* pCol = &pTableInfo->colList[j]; int32_t ret = getResultDataInfo(pCol->type, pCol->bytes, functId, (int32_t)pExprs[i].base.param[0].i64, - &pExprs[i].base.resType, &pExprs[i].base.resBytes, &pExprs[i].base.interBytes, tagLen, superTable); + &pExprs[i].base.resType, &pExprs[i].base.resBytes, &pExprs[i].base.interBytes, tagLen, superTable, NULL); assert(ret == TSDB_CODE_SUCCESS); } } @@ -6727,12 +7274,138 @@ static int32_t updateOutputBufForTopBotQuery(SQueriedTableInfo* pTableInfo, SCol return TSDB_CODE_SUCCESS; } +void destroyUdfInfo(SUdfInfo* pUdfInfo) { + if (pUdfInfo == NULL) { + return; + } + + if (pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY]) { + if (pUdfInfo->isScript) { + (*(scriptDestroyFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY])(pUdfInfo->pScriptCtx); + tfree(pUdfInfo->content); + }else{ + (*(udfDestroyFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY])(&pUdfInfo->init); + } + } + + tfree(pUdfInfo->name); + + if (pUdfInfo->path) { + unlink(pUdfInfo->path); + } + + tfree(pUdfInfo->path); + tfree(pUdfInfo->content); + taosCloseDll(pUdfInfo->handle); + tfree(pUdfInfo); +} + +static char* getUdfFuncName(char* name, int type) { + char* funcname = calloc(1, TSDB_FUNCTIONS_NAME_MAX_LENGTH + 10); + + switch (type) { + case TSDB_UDF_FUNC_NORMAL: + strcpy(funcname, name); + break; + case TSDB_UDF_FUNC_INIT: + sprintf(funcname, "%s_init", name); + break; + case TSDB_UDF_FUNC_FINALIZE: + sprintf(funcname, "%s_finalize", name); + break; + case TSDB_UDF_FUNC_MERGE: + sprintf(funcname, "%s_merge", name); + break; + case TSDB_UDF_FUNC_DESTROY: + sprintf(funcname, "%s_destroy", name); + break; + default: + assert(0); + break; + } + + return funcname; +} + +int32_t initUdfInfo(SUdfInfo* pUdfInfo) { + if (pUdfInfo == NULL) { + return TSDB_CODE_SUCCESS; + } + //qError("script len: %d", pUdfInfo->contLen); + if (isValidScript(pUdfInfo->content, pUdfInfo->contLen)) { + pUdfInfo->isScript = 1; + pUdfInfo->pScriptCtx = createScriptCtx(pUdfInfo->content, pUdfInfo->resType, pUdfInfo->resBytes); + if (pUdfInfo->pScriptCtx == NULL) { + return TSDB_CODE_QRY_SYS_ERROR; + } + tfree(pUdfInfo->content); + + pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] = taosLoadScriptInit; + if (pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] == NULL + || (*(scriptInitFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_INIT])(pUdfInfo->pScriptCtx) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_QRY_SYS_ERROR; + } + + pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL] = taosLoadScriptNormal; + + if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { + pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE] = taosLoadScriptFinalize; + pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE] = taosLoadScriptMerge; + } + pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY] = taosLoadScriptDestroy; + + } else { + char path[PATH_MAX] = {0}; + taosGetTmpfilePath("script", path); + + FILE* file = fopen(path, "w+"); + + // TODO check for failure of flush to disk + /*size_t t = */ fwrite(pUdfInfo->content, pUdfInfo->contLen, 1, file); + fclose(file); + tfree(pUdfInfo->content); + + pUdfInfo->path = strdup(path); + + pUdfInfo->handle = taosLoadDll(path); + + if (NULL == pUdfInfo->handle) { + return TSDB_CODE_QRY_SYS_ERROR; + } + + pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_NORMAL)); + if (NULL == pUdfInfo->funcs[TSDB_UDF_FUNC_NORMAL]) { + return TSDB_CODE_QRY_SYS_ERROR; + } + + pUdfInfo->funcs[TSDB_UDF_FUNC_INIT] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_INIT)); + + if (pUdfInfo->funcType == TSDB_UDF_TYPE_AGGREGATE) { + pUdfInfo->funcs[TSDB_UDF_FUNC_FINALIZE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_FINALIZE)); + pUdfInfo->funcs[TSDB_UDF_FUNC_MERGE] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_MERGE)); + } + + pUdfInfo->funcs[TSDB_UDF_FUNC_DESTROY] = taosLoadSym(pUdfInfo->handle, getUdfFuncName(pUdfInfo->name, TSDB_UDF_FUNC_DESTROY)); + + if (pUdfInfo->funcs[TSDB_UDF_FUNC_INIT]) { + return (*(udfInitFunc)pUdfInfo->funcs[TSDB_UDF_FUNC_INIT])(&pUdfInfo->init); + } + } + + return TSDB_CODE_SUCCESS; +} + // TODO tag length should be passed from client, refactor int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExprInfo** pExprInfo, - SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg) { + SSqlExpr** pExprMsg, SColumnInfo* pTagCols, int32_t queryType, void* pMsg, SUdfInfo* pUdfInfo) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; + code = initUdfInfo(pUdfInfo); + if (code) { + return code; + } + SExprInfo *pExprs = (SExprInfo *)calloc(numOfOutput, sizeof(SExprInfo)); if (pExprs == NULL) { return TSDB_CODE_QRY_OUT_OF_MEMORY; @@ -6743,8 +7416,8 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp for (int32_t i = 0; i < numOfOutput; ++i) { pExprs[i].base = *pExprMsg[i]; - memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param)); + memset(pExprs[i].base.param, 0, sizeof(tVariant) * tListLen(pExprs[i].base.param)); for (int32_t j = 0; j < pExprMsg[i]->numOfParams; ++j) { tVariantAssign(&pExprs[i].base.param[j], &pExprMsg[i]->param[j]); } @@ -6819,8 +7492,9 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp return TSDB_CODE_QRY_INVALID_MSG; } + // todo remove it if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, - &pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { + &pExprs[i].base.interBytes, 0, isSuperTable, pUdfInfo) != TSDB_CODE_SUCCESS) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } @@ -6841,7 +7515,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp // todo refactor int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t numOfOutput, SExprInfo** pExprInfo, - SSqlExpr** pExpr, SExprInfo* prevExpr) { + SSqlExpr** pExpr, SExprInfo* prevExpr, SUdfInfo *pUdfInfo) { *pExprInfo = NULL; int32_t code = TSDB_CODE_SUCCESS; @@ -6886,7 +7560,7 @@ int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg* pQueryMsg, int32_t nu int32_t param = (int32_t)pExprs[i].base.param[0].i64; if (getResultDataInfo(type, bytes, pExprs[i].base.functionId, param, &pExprs[i].base.resType, &pExprs[i].base.resBytes, - &pExprs[i].base.interBytes, 0, isSuperTable) != TSDB_CODE_SUCCESS) { + &pExprs[i].base.interBytes, 0, isSuperTable, pUdfInfo) != TSDB_CODE_SUCCESS) { tfree(pExprs); return TSDB_CODE_QRY_INVALID_MSG; } @@ -7000,6 +7674,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId) { doCreateFilterInfo(pQueryAttr->tableCols, pQueryAttr->numOfCols, pQueryAttr->numOfFilterCols, &pQueryAttr->pFilterInfo, qId); + pQueryAttr->createFilterOperator = true; + return TSDB_CODE_SUCCESS; } @@ -7070,7 +7746,7 @@ FORCE_INLINE bool checkQIdEqual(void *qHandle, uint64_t qId) { SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, SExprInfo* pExprs, SExprInfo* pSecExprs, STableGroupInfo* pTableGroupInfo, SColumnInfo* pTagCols, int32_t vgId, - char* sql, uint64_t *qId) { + char* sql, uint64_t qId, SUdfInfo* pUdfInfo) { int16_t numOfCols = pQueryMsg->numOfCols; int16_t numOfOutput = pQueryMsg->numOfOutput; @@ -7079,7 +7755,9 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S goto _cleanup_qinfo; } - pQInfo->qId = *qId; + pQInfo->qId = qId; + + pQInfo->runtimeEnv.pUdfInfo = pUdfInfo; // to make sure third party won't overwrite this structure pQInfo->signature = pQInfo; @@ -7104,6 +7782,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S pQueryAttr->prjInfo.vgroupLimit = pQueryMsg->vgroupLimit; pQueryAttr->prjInfo.ts = (pQueryMsg->order == TSDB_ORDER_ASC)? INT64_MIN:INT64_MAX; pQueryAttr->sw = pQueryMsg->sw; + pQueryAttr->vgId = vgId; pQueryAttr->stableQuery = pQueryMsg->stableQuery; pQueryAttr->topBotQuery = pQueryMsg->topBotQuery; @@ -7136,7 +7815,6 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S } } - // calculate the result row size for (int16_t col = 0; col < numOfOutput; ++col) { assert(pExprs[col].base.resBytes > 0); pQueryAttr->resultRowSize += pExprs[col].base.resBytes; @@ -7157,6 +7835,20 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S goto _cleanup; } + if (pSecExprs != NULL) { + int32_t resultRowSize = 0; + + // calculate the result row size + for (int16_t col = 0; col < pQueryAttr->numOfExpr2; ++col) { + assert(pSecExprs[col].base.resBytes > 0); + resultRowSize += pSecExprs[col].base.resBytes; + } + + if (resultRowSize > pQueryAttr->resultRowSize) { + pQueryAttr->resultRowSize = resultRowSize; + } + } + if (pQueryAttr->fillType != TSDB_FILL_NONE) { pQueryAttr->fillVal = malloc(sizeof(int64_t) * pQueryAttr->numOfOutput); if (pQueryAttr->fillVal == NULL) { @@ -7189,7 +7881,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S tsem_init(&pQInfo->ready, 0, 0); pQueryAttr->window = pQueryMsg->window; - changeExecuteScanOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery); + updateDataCheckOrder(pQInfo, pQueryMsg, pQueryAttr->stableQuery); SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; STimeWindow window = pQueryAttr->window; @@ -7285,11 +7977,16 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; STSBuf *pTsBuf = NULL; - if (pTsBufInfo->tsLen > 0) { // open new file to save the result - char *tsBlock = start + pTsBufInfo->tsOffset; + + if (pTsBufInfo->tsLen > 0) { // open new file to save the result + char* tsBlock = start + pTsBufInfo->tsOffset; pTsBuf = tsBufCreateFromCompBlocks(tsBlock, pTsBufInfo->tsNumOfBlocks, pTsBufInfo->tsLen, pTsBufInfo->tsOrder, - pQueryAttr->vgId); + pQueryAttr->vgId); + if (pTsBuf == NULL) { + code = TSDB_CODE_QRY_NO_DISKSPACE; + goto _error; + } tsBufResetPos(pTsBuf); bool ret = tsBufNextPos(pTsBuf); UNUSED(ret); @@ -7297,11 +7994,12 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, void* sourceOptr, SQInfo* SArray* prevResult = NULL; if (prevResultLen > 0) { - prevResult = interResFromBinary(param->prevResult, prevResultLen); - + prevResult = interResFromBinary(param->prevResult, prevResultLen); + pRuntimeEnv->prevResult = prevResult; } + pRuntimeEnv->currentOffset = pQueryAttr->limit.offset; if (tsdb != NULL) { pQueryAttr->precision = tsdbGetCfg(tsdb)->precision; } @@ -7433,6 +8131,9 @@ void freeQInfo(SQInfo *pQInfo) { tfree(pQInfo->pBuf); tfree(pQInfo->sql); + taosArrayDestroy(pQInfo->summary.queryProfEvents); + taosHashCleanup(pQInfo->summary.operatorProfResults); + taosArrayDestroy(pRuntimeEnv->groupResInfo.pRows); pQInfo->signature = 0; diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c index 7e8dd66ed12ee38d9dba5c982a5b78fda9a872ac..1c1ec21d653b5b96fe792aa05641191a441b8e8d 100644 --- a/src/query/src/qFilterfunc.c +++ b/src/query/src/qFilterfunc.c @@ -254,15 +254,23 @@ bool notNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* return true; } bool inOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type) { - if (type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_SMALLINT || type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_INT) { + if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t minv = -1, maxv = -1; GET_TYPED_DATA(minv, int64_t, type, minval); GET_TYPED_DATA(maxv, int64_t, type, maxval); if (minv == maxv) { return NULL != taosHashGet((SHashObj *)pFilter->q, (char *)&minv, sizeof(minv)); } - return true; - } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { + return false; + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + uint64_t minv = 0, maxv = 0; + GET_TYPED_DATA(minv, uint64_t, type, minval); + GET_TYPED_DATA(maxv, uint64_t, type, maxval); + if (minv == maxv) { + return NULL != taosHashGet((SHashObj *)pFilter->q, (char *)&minv, sizeof(minv)); + } + return false; + }else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { double v; GET_TYPED_DATA(v, double, type, minval); return NULL != taosHashGet((SHashObj *)pFilter->q, (char *)&v, sizeof(v)); diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index ee587a515dca39559bc6d061501d4e3397c0781a..a94d015e0679aba71732eee3e15f11be4e40de9a 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -104,7 +104,7 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo* int32_t num = (int32_t) taosArrayGetSize(pExprs); SQueryNode* pNode = createQueryNode(QNODE_TAGSCAN, "TableTagScan", NULL, 0, pExprs->pData, num, info, NULL); - if (pQueryInfo->distinctTag) { + if (pQueryInfo->distinct) { pNode = createQueryNode(QNODE_DISTINCT, "Distinct", &pNode, 1, pExprs->pData, num, info, NULL); } @@ -127,7 +127,8 @@ static SQueryNode* doAddTableColumnNode(SQueryInfo* pQueryInfo, STableMetaInfo* SColumn* pCol = taosArrayGetP(tableCols, i); SColumnIndex index = {.tableIndex = 0, .columnIndex = pCol->columnIndex}; - SExprInfo* p = tscExprCreate(pQueryInfo, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes, + STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo, index.tableIndex); + SExprInfo* p = tscExprCreate(pTableMetaInfo1, TSDB_FUNC_PRJ, &index, pCol->info.type, pCol->info.bytes, pCol->info.colId, 0, TSDB_COL_NORMAL); strncpy(p->base.aliasName, pSchema[pCol->columnIndex].name, tListLen(p->base.aliasName)); @@ -550,9 +551,11 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { int32_t op = 0; if (onlyQueryTags(pQueryAttr)) { // do nothing for tags query - op = OP_TagScan; - taosArrayPush(plan, &op); - if (pQueryAttr->distinctTag) { + if (onlyQueryTags(pQueryAttr)) { + op = OP_TagScan; + taosArrayPush(plan, &op); + } + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); } @@ -565,7 +568,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } @@ -585,7 +588,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else if (pQueryAttr->sw.gap > 0) { @@ -593,7 +596,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else if (pQueryAttr->stateWindow) { @@ -601,7 +604,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else if (pQueryAttr->simpleAgg) { @@ -619,18 +622,23 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } if (pQueryAttr->pExpr2 != NULL && !pQueryAttr->stableQuery) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else { // diff/add/multiply/subtract/division - if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->vgId == 0) { // todo refactor + if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->createFilterOperator && pQueryAttr->vgId == 0) { // todo refactor op = OP_Filter; taosArrayPush(plan, &op); } else { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); + if (pQueryAttr->distinct) { + op = OP_Distinct; + taosArrayPush(plan, &op); + } } } + if (pQueryAttr->limit.limit > 0 || pQueryAttr->limit.offset > 0) { op = OP_Limit; @@ -650,7 +658,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { int32_t op = OP_MultiwayMergeSort; taosArrayPush(plan, &op); - if (pQueryAttr->distinctTag) { + if (pQueryAttr->distinct) { op = OP_Distinct; taosArrayPush(plan, &op); } @@ -665,7 +673,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { } if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } diff --git a/src/query/src/qScript.c b/src/query/src/qScript.c new file mode 100644 index 0000000000000000000000000000000000000000..261164a84c0b347adc36e3e2abaf2113d5564436 --- /dev/null +++ b/src/query/src/qScript.c @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "os.h" +#include "qScript.h" +#include "ttype.h" +#include "tstrbuild.h" +#include "queryLog.h" +#include "ttokendef.h" + +static ScriptEnvPool *pool = NULL; + +static ScriptEnv* getScriptEnvFromPool(); +static void addScriptEnvToPool(ScriptEnv *pEnv); + +static lua_State* createLuaEnv(); +static void destroyLuaEnv(lua_State *state); + +static void destroyScriptEnv(ScriptEnv *pEnv); + +static void luaValueToTaosType(lua_State *lua, char *interBuf, int32_t *numOfOutput, int16_t oType, int16_t oBytes); +static void taosValueToLuaType(lua_State *lua, int32_t type, char *val); + +static bool hasBaseFuncDefinedInScript(lua_State *lua, const char *funcPrefix, int32_t len); + +static int userlib_exampleFunc(lua_State *lua) { + double op1 = luaL_checknumber(lua,1); + double op2 = luaL_checknumber(lua,2); + lua_pushnumber(lua, op1 * op2); + return 1; +} +void luaRegisterLibFunc(lua_State *lua) { + lua_register(lua, "exampleFunc", userlib_exampleFunc); +} + +void luaLoadLib(lua_State *lua, const char *libname, lua_CFunction luafunc) { + lua_pushcfunction(lua, luafunc); + lua_pushstring(lua, libname); + lua_call(lua, 1, 0); +} + +LUALIB_API int (luaopen_cjson) (lua_State *L); +LUALIB_API int (luaopen_struct) (lua_State *L); +LUALIB_API int (luaopen_cmsgpack) (lua_State *L); +LUALIB_API int (luaopen_bit) (lua_State *L); + + +static void luaLoadLibraries(lua_State *lua) { + luaLoadLib(lua, "", luaopen_base); + luaLoadLib(lua, LUA_TABLIBNAME, luaopen_table); + luaLoadLib(lua, LUA_STRLIBNAME, luaopen_string); + luaLoadLib(lua, LUA_MATHLIBNAME, luaopen_math); + luaLoadLib(lua, LUA_DBLIBNAME, luaopen_debug); +} +static void luaRemoveUnsupportedFunctions(lua_State *lua) { + lua_pushnil(lua); + lua_setglobal(lua,"loadfile"); + lua_pushnil(lua); + lua_setglobal(lua,"dofile"); +} +void taosValueToLuaType(lua_State *lua, int32_t type, char *val) { + //TODO(dengyihao): handle more data type + if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) { + int64_t v; + GET_TYPED_DATA(v, int64_t, type, val); + lua_pushnumber(lua, (lua_Number)v); + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + uint64_t v; + GET_TYPED_DATA(v, uint64_t, type, val); + lua_pushnumber(lua,(lua_Number)v); + } else if (IS_FLOAT_TYPE(type)) { + double v; + GET_TYPED_DATA(v, double, type, val); + lua_pushnumber(lua,v); + } else if (type == TSDB_DATA_TYPE_BINARY) { + lua_pushlstring(lua, val, varDataLen(val)); + } else if (type == TSDB_DATA_TYPE_NCHAR) { + } +} +int taosLoadScriptInit(void* pInit) { + ScriptCtx *pCtx = pInit; + char funcName[MAX_FUNC_NAME] = {0}; + sprintf(funcName, "%s_init", pCtx->funcName); + + lua_State* lua = pCtx->pEnv->lua_state; + lua_getglobal(lua, funcName); + if (lua_pcall(lua, 0, -1, 0)) { + lua_pop(lua, -1); + } + lua_setglobal(lua, "global"); + return 0; +} +void taosLoadScriptNormal(void *pInit, char *pInput, int16_t iType, int16_t iBytes, int32_t numOfRows, + int64_t *ptsList, int64_t key, char* pOutput, char *ptsOutput, int32_t *numOfOutput, int16_t oType, int16_t oBytes) { + ScriptCtx* pCtx = pInit; + char funcName[MAX_FUNC_NAME] = {0}; + sprintf(funcName, "%s_add", pCtx->funcName); + + lua_State* lua = pCtx->pEnv->lua_state; + lua_getglobal(lua, funcName); + + // first param of script; + lua_newtable(lua); + int32_t offset = 0; + for (int32_t i = 0; i < numOfRows; i++) { + taosValueToLuaType(lua, iType, pInput + offset); + lua_rawseti(lua, -2, i+1); + offset += iBytes; + } + int isGlobalState = false; + lua_getglobal(lua, "global"); + if (lua_istable(lua, -1)) { + isGlobalState = true; + } + lua_pushnumber(lua, (lua_Number)key); + // do call lua script + if (lua_pcall(lua, 3, 1, 0) != 0) { + qError("SCRIPT ERROR: %s", lua_tostring(lua, -1)); + lua_pop(lua, -1); + return; + } + int tNumOfOutput = 0; + if (isGlobalState == false) { + luaValueToTaosType(lua, pOutput, &tNumOfOutput, oType, oBytes); + } else { + lua_setglobal(lua, "global"); + } + *numOfOutput = tNumOfOutput; +} + +void taosLoadScriptMerge(void *pInit, char* data, int32_t numOfRows, char* pOutput, int32_t* numOfOutput) { + ScriptCtx *pCtx = pInit; + char funcName[MAX_FUNC_NAME] = {0}; + sprintf(funcName, "%s_merge", pCtx->funcName); + + lua_State* lua = pCtx->pEnv->lua_state; + lua_getglobal(lua, funcName); + if (!lua_isfunction(lua, -1)) { + qError("SCRIPT ERROR: %s", lua_tostring(lua, -1)); + return; + } + + lua_getglobal(lua, "global"); + if (lua_pcall(lua, 1, 1, 0) != 0) { + qError("SCRIPT ERROR: %s", lua_tostring(lua, -1)); + lua_pop(lua, -1); + return; + } + int tNumOfOutput = 0; + luaValueToTaosType(lua, pOutput, &tNumOfOutput, pCtx->resType, pCtx->resBytes); + *numOfOutput = tNumOfOutput; +} + +//do not support agg now +void taosLoadScriptFinalize(void *pInit,int64_t key, char *pOutput, int32_t* numOfOutput) { + ScriptCtx *pCtx = pInit; + char funcName[MAX_FUNC_NAME] = {0}; + sprintf(funcName, "%s_finalize", pCtx->funcName); + + lua_State* lua = pCtx->pEnv->lua_state; + lua_getglobal(lua, funcName); + if (!lua_isfunction(lua, -1)) { + qError("SCRIPT ERROR: %s", lua_tostring(lua, -1)); + return; + } + + lua_getglobal(lua, "global"); + + lua_pushnumber(lua, (lua_Number)key); + if (lua_pcall(lua, 2, 2, 0) != 0) { + qError("SCRIPT ERROR: %s", lua_tostring(lua, -1)); + lua_pop(lua, -1); + return; + } + lua_setglobal(lua, "global"); + int tNumOfOutput = 0; + luaValueToTaosType(lua, pOutput, &tNumOfOutput, pCtx->resType, pCtx->resBytes); + *numOfOutput = tNumOfOutput; +} + +void taosLoadScriptDestroy(void *pInit) { + destroyScriptCtx(pInit); +} + +ScriptCtx* createScriptCtx(char *script, int8_t resType, int16_t resBytes) { + ScriptCtx *pCtx = (ScriptCtx *)calloc(1, sizeof(ScriptCtx)); + pCtx->state = SCRIPT_STATE_INIT; + pCtx->pEnv = getScriptEnvFromPool(); // + pCtx->resType = resType; + pCtx->resBytes = resBytes; + + if (pCtx->pEnv == NULL) { + destroyScriptCtx(pCtx); + return NULL; + } + + lua_State *lua = pCtx->pEnv->lua_state; + if (luaL_dostring(lua, script)) { + lua_pop(lua, 1); + qError("dynamic load script failed"); + destroyScriptCtx(pCtx); + return NULL; + } + lua_getglobal(lua, USER_FUNC_NAME); + const char *name = lua_tostring(lua, -1); + if (name == NULL) { + lua_pop(lua, 1); + qError("SCRIPT ERROR: invalid script"); + destroyScriptCtx(pCtx); + return NULL; + } + memcpy(pCtx->funcName, name, strlen(name)); + lua_pop(lua, 1); + + return pCtx; +} +void destroyScriptCtx(void *pCtx) { + if (pCtx == NULL) return; + addScriptEnvToPool(((ScriptCtx *)pCtx)->pEnv); + free(pCtx); +} + +void luaValueToTaosType(lua_State *lua, char *interBuf, int32_t *numOfOutput, int16_t oType, int16_t oBytes) { + int t = lua_type(lua,-1); + int32_t sz = 0; + switch (t) { + case LUA_TSTRING: + //TODO(yihaodeng): handle str type + { + const char *v = lua_tostring(lua, -1); + memcpy(interBuf, v, strlen(v)); + sz = 1; + } + break; + case LUA_TBOOLEAN: + { + double v = lua_tonumber(lua, -1); + memcpy(interBuf, (char *)&v, oBytes); + sz = 1; + } + break; + case LUA_TNUMBER: + { + if (oType == TSDB_DATA_TYPE_FLOAT) { + float v = (float)lua_tonumber(lua, -1); + memcpy(interBuf, (char *)&v, oBytes); + sz = 1; + } else if (oType == TSDB_DATA_TYPE_DOUBLE) { + double v = (double)lua_tonumber(lua, -1); + memcpy(interBuf, (char *)&v, oBytes); + sz = 1; + } else if (oType == TSDB_DATA_TYPE_BIGINT) { + int64_t v = (int64_t)lua_tonumber(lua, -1); + memcpy(interBuf, (char *)&v, oBytes); + sz = 1; + } else if (oType <= TSDB_DATA_TYPE_INT) { + int32_t v = (int32_t)lua_tonumber(lua, -1); + memcpy(interBuf, (char *)&v, oBytes); + sz = 1; + } + } + break; + case LUA_TTABLE: + { + lua_pushnil(lua); + int32_t offset = 0; + while(lua_next(lua, -2)) { + int32_t v = (int32_t)lua_tonumber(lua, -1); + memcpy(interBuf + offset, (char *)&v, oBytes); + offset += oBytes; + lua_pop(lua, 1); + sz += 1; + } + } + break; + default: + setNull(interBuf, oType, oBytes); + sz = 1; + break; + } + lua_pop(lua,1); // pop ret value from script + *numOfOutput = sz; +} + + +/* +*Initialize the scripting environment. +*/ +lua_State* createLuaEnv() { + lua_State *lua = lua_open(); + luaLoadLibraries(lua); + luaRemoveUnsupportedFunctions(lua); + + // register func in external lib + luaRegisterLibFunc(lua); + + { + char *errh_func = "local dbg = debug\n" + "function __taos__err__handler(err)\n" + " local i = dbg.getinfo(2,'nSl')\n" + " if i and i.what == 'C' then\n" + " i = dbg.getinfo(3,'nSl')\n" + " end\n" + " if i then\n" + " return i.source .. ':' .. i.currentline .. ': ' .. err\n" + " else\n" + " return err\n" + " end\n" + "end\n"; + luaL_loadbuffer(lua,errh_func,strlen(errh_func),"@err_handler_def"); + lua_pcall(lua,0,0,0); + } + + return lua; +} + +void destroyLuaEnv(lua_State *lua) { + lua_close(lua); +} + +int32_t scriptEnvPoolInit() { + const int size = 10; // configure or not + pool = malloc(sizeof(ScriptEnvPool)); + pthread_mutex_init(&pool->mutex, NULL); + + pool->scriptEnvs = tdListNew(sizeof(ScriptEnv *)); + for (int i = 0; i < size; i++) { + ScriptEnv *env = malloc(sizeof(ScriptEnv)); + env->funcId = taosHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);; + env->lua_state = createLuaEnv(); + tdListAppend(pool->scriptEnvs, (void *)(&env)); + } + pool->mSize = size; + pool->cSize = size; + return 0; +} + +void scriptEnvPoolCleanup() { + if (pool == NULL) return; + + SListNode *pNode = NULL; + while ((pNode = tdListPopHead(pool->scriptEnvs)) != NULL) { + ScriptEnv *pEnv = NULL; + tdListNodeGetData(pool->scriptEnvs, pNode, (void *)(&pEnv)); + destroyScriptEnv(pEnv); + listNodeFree(pNode); + } + tdListFree(pool->scriptEnvs); + pthread_mutex_destroy(&pool->mutex); + free(pool); +} + +void destroyScriptEnv(ScriptEnv *pEnv) { + destroyLuaEnv(pEnv->lua_state); + taosHashCleanup(pEnv->funcId); + free(pEnv); +} + +ScriptEnv* getScriptEnvFromPool() { + ScriptEnv *pEnv = NULL; + + pthread_mutex_lock(&pool->mutex); + if (pool->cSize <= 0) { + pthread_mutex_unlock(&pool->mutex); + return NULL; + } + SListNode *pNode = tdListPopHead(pool->scriptEnvs); + tdListNodeGetData(pool->scriptEnvs, pNode, (void *)(&pEnv)); + listNodeFree(pNode); + + pool->cSize--; + pthread_mutex_unlock(&pool->mutex); + return pEnv; +} + +void addScriptEnvToPool(ScriptEnv *pEnv) { + if (pEnv == NULL) { + return; + } + pthread_mutex_lock(&pool->mutex); + lua_settop(pEnv->lua_state, 0); + tdListAppend(pool->scriptEnvs, (void *)(&pEnv)); + pool->cSize++; + pthread_mutex_unlock(&pool->mutex); +} + +bool hasBaseFuncDefinedInScript(lua_State *lua, const char *funcPrefix, int32_t len) { + bool ret = true; + char funcName[MAX_FUNC_NAME]; + memcpy(funcName, funcPrefix, len); + + const char *base[] = {"_init", "_add"}; + for (int i = 0; (i < sizeof(base)/sizeof(base[0])) && (ret == true); i++) { + memcpy(funcName + len, base[i], strlen(base[i])); + memset(funcName + len + strlen(base[i]), 0, MAX_FUNC_NAME - len - strlen(base[i])); + lua_getglobal(lua, funcName); + ret = lua_isfunction(lua, -1); // exsit function or not + lua_pop(lua, 1); + } + return ret; +} + +bool isValidScript(char *script, int32_t len) { + ScriptEnv *pEnv = getScriptEnvFromPool(); // + if (pEnv == NULL) { + return false; + } + lua_State *lua = pEnv->lua_state; + if (len < strlen(script)) { + script[len] = 0; + } + if (luaL_dostring(lua, script)) { + lua_pop(lua, 1); + addScriptEnvToPool(pEnv); + qError("error at %s and %d", script, (int)(strlen(script))); + return false; + } + lua_getglobal(lua, USER_FUNC_NAME); + const char *name = lua_tostring(lua, -1); + if (name == NULL || strlen(name) >= USER_FUNC_NAME_LIMIT) { + lua_pop(lua, 1); + addScriptEnvToPool(pEnv); + qError("error at %s name: %s, len = %d", script, name, (int)(strlen(name))); + return false; + } + bool ret = hasBaseFuncDefinedInScript(lua, name, (int32_t)strlen(name)); + lua_pop(lua, 1); // pop + addScriptEnvToPool(pEnv); + return ret; +} + diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index a2cb7aee00c8bd8173557a5bf19e8e337290c25c..eb920b3e173b0ca7d91ff01664cbc2d6ae28fa01 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -28,6 +28,7 @@ SSqlInfo qSqlParse(const char *pStr) { SSqlInfo sqlInfo = {0}; sqlInfo.valid = true; + sqlInfo.funcs = taosArrayInit(4, sizeof(SStrToken)); int32_t i = 0; while (1) { @@ -120,11 +121,24 @@ void tSqlExprListDestroy(SArray *pList) { taosArrayDestroyEx(pList, freeExprElem); } + +SArray *tStrTokenAppend(SArray *pList, SStrToken *pToken) { + if (pList == NULL) { + pList = taosArrayInit(4, sizeof(tVariantListItem)); + } + + if (pToken) { + taosArrayPush(pList, pToken); + } + + return pList; +} + tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { tSqlExpr *pSqlExpr = calloc(1, sizeof(tSqlExpr)); if (pToken != NULL) { - pSqlExpr->token = *pToken; + pSqlExpr->exprToken = *pToken; } if (optrType == TK_NULL) { @@ -146,7 +160,8 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { pSqlExpr->type = SQL_NODE_VALUE; pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP; } else if (optrType == TK_VARIABLE) { - // use nanosecond by default TODO set value after getting database precision + // use nanosecond by default + // TODO set value after getting database precision int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO); if (ret != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; @@ -161,7 +176,7 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { // Here it must be the column name (tk_id) if it is not a number or string. assert(optrType == TK_ID || optrType == TK_ALL); if (pToken != NULL) { - pSqlExpr->colInfo = *pToken; + pSqlExpr->columnName = *pToken; } pSqlExpr->tokenId = optrType; @@ -180,17 +195,17 @@ tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToke return NULL; } - tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr)); - pExpr->tokenId = optType; - pExpr->type = SQL_NODE_SQLFUNCTION; - pExpr->pParam = pParam; + tSqlExpr *pExpr = calloc(1, sizeof(tSqlExpr)); + pExpr->tokenId = optType; + pExpr->type = SQL_NODE_SQLFUNCTION; + pExpr->Expr.paramList = pParam; int32_t len = (int32_t)((endToken->z + endToken->n) - pFuncToken->z); - pExpr->operand = (*pFuncToken); + pExpr->Expr.operand = (*pFuncToken); - pExpr->token.n = len; - pExpr->token.z = pFuncToken->z; - pExpr->token.type = pFuncToken->type; + pExpr->exprToken.n = len; + pExpr->exprToken.z = pFuncToken->z; + pExpr->exprToken.type = pFuncToken->type; return pExpr; } @@ -204,16 +219,16 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { pExpr->type = SQL_NODE_EXPR; if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) { - char* endPos = pRight->token.z + pRight->token.n; - pExpr->token.z = pLeft->token.z; - pExpr->token.n = (uint32_t)(endPos - pExpr->token.z); - pExpr->token.type = pLeft->token.type; + char* endPos = pRight->exprToken.z + pRight->exprToken.n; + pExpr->exprToken.z = pLeft->exprToken.z; + pExpr->exprToken.n = (uint32_t)(endPos - pExpr->exprToken.z); + pExpr->exprToken.type = pLeft->exprToken.type; } if ((pLeft != NULL && pRight != NULL) && (optrType == TK_PLUS || optrType == TK_MINUS || optrType == TK_STAR || optrType == TK_DIVIDE || optrType == TK_REM)) { /* - * if a token is noted as the TK_TIMESTAMP, the time precision is microsecond + * if a exprToken is noted as the TK_TIMESTAMP, the time precision is microsecond * Otherwise, the time precision is adaptive, determined by the time precision from databases. */ if ((pLeft->tokenId == TK_INTEGER && pRight->tokenId == TK_INTEGER) || @@ -304,7 +319,7 @@ tSqlExpr *tSqlExprCreate(tSqlExpr *pLeft, tSqlExpr *pRight, int32_t optrType) { tSqlExpr *pRSub = calloc(1, sizeof(tSqlExpr)); pRSub->tokenId = TK_SET; // TODO refactor ..... - pRSub->pParam = (SArray *)pRight; + pRSub->Expr.paramList = (SArray *)pRight; pExpr->pRight = pRSub; } else { @@ -346,8 +361,8 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { || (left->pLeft == NULL && right->pLeft) || (left->pRight && right->pRight == NULL) || (left->pRight == NULL && right->pRight) - || (left->pParam && right->pParam == NULL) - || (left->pParam == NULL && right->pParam)) { + || (left->Expr.paramList && right->Expr.paramList == NULL) + || (left->Expr.paramList == NULL && right->Expr.paramList)) { return 1; } @@ -355,20 +370,20 @@ int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right) { return 1; } - if (tStrTokenCompare(&left->colInfo, &right->colInfo)) { + if (tStrTokenCompare(&left->columnName, &right->columnName)) { return 1; } - if (right->pParam && left->pParam) { - size_t size = taosArrayGetSize(right->pParam); - if (left->pParam && taosArrayGetSize(left->pParam) != size) { + if (right->Expr.paramList && left->Expr.paramList) { + size_t size = taosArrayGetSize(right->Expr.paramList); + if (left->Expr.paramList && taosArrayGetSize(left->Expr.paramList) != size) { return 1; } for (int32_t i = 0; i < size; i++) { - tSqlExprItem* pLeftElem = taosArrayGet(left->pParam, i); + tSqlExprItem* pLeftElem = taosArrayGet(left->Expr.paramList, i); tSqlExpr* pSubLeft = pLeftElem->pNode; - tSqlExprItem* pRightElem = taosArrayGet(right->pParam, i); + tSqlExprItem* pRightElem = taosArrayGet(right->Expr.paramList, i); tSqlExpr* pSubRight = pRightElem->pNode; if (tSqlExprCompare(pSubLeft, pSubRight)) { @@ -401,8 +416,8 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { pExpr->pRight = tSqlExprClone(pSrc->pRight); } - //we don't clone pParam now because clone is only used for between/and - assert(pSrc->pParam == NULL); + //we don't clone paramList now because clone is only used for between/and + assert(pSrc->Expr.paramList == NULL); return pExpr; } @@ -460,7 +475,7 @@ static void doDestroySqlExprNode(tSqlExpr *pExpr) { tVariantDestroy(&pExpr->value); } - tSqlExprListDestroy(pExpr->pParam); + tSqlExprListDestroy(pExpr->Expr.paramList); free(pExpr); } @@ -697,9 +712,8 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) { } else { int32_t bytes = -(int32_t)(type->type); if (bytes > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) { - // we have to postpone reporting the error because it cannot be done here - // as pField->bytes is int16_t, use 'TSDB_MAX_NCHAR_LEN + 1' to avoid overflow - bytes = TSDB_MAX_NCHAR_LEN + 1; + // overflowed. set bytes to -1 so that error can be reported + bytes = -1; } else { bytes = bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE; } @@ -712,8 +726,8 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) { } else { int32_t bytes = -(int32_t)(type->type); if (bytes > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) { - // refer comment for NCHAR above - bytes = TSDB_MAX_BINARY_LEN + 1; + // overflowed. set bytes to -1 so that error can be reported + bytes = -1; } else { bytes += VARSTR_HEADER_SIZE; } @@ -922,8 +936,8 @@ void* destroyCreateTableSql(SCreateTableSql* pCreate) { } void SqlInfoDestroy(SSqlInfo *pInfo) { - if (pInfo == NULL) return; - + if (pInfo == NULL) return;; + taosArrayDestroy(pInfo->funcs); if (pInfo->type == TSDB_SQL_SELECT) { destroyAllSqlNode(pInfo->list); } else if (pInfo->type == TSDB_SQL_CREATE_TABLE) { @@ -938,7 +952,7 @@ void SqlInfoDestroy(SSqlInfo *pInfo) { taosArrayDestroy(pInfo->pMiscInfo->a); } - if (pInfo->pMiscInfo != NULL && pInfo->type == TSDB_SQL_CREATE_DB) { + if (pInfo->pMiscInfo != NULL && (pInfo->type == TSDB_SQL_CREATE_DB || pInfo->type == TSDB_SQL_ALTER_DB)) { taosArrayDestroyEx(pInfo->pMiscInfo->dbOpt.keep, freeVariant); } @@ -1018,6 +1032,18 @@ void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrTo pInfo->pMiscInfo->tableType = tableType; } +void setDropFuncInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken) { + pInfo->type = type; + + if (pInfo->pMiscInfo == NULL) { + pInfo->pMiscInfo = (SMiscInfo *)calloc(1, sizeof(SMiscInfo)); + pInfo->pMiscInfo->a = taosArrayInit(4, sizeof(SStrToken)); + } + + taosArrayPush(pInfo->pMiscInfo->a, pToken); +} + + void setShowOptions(SSqlInfo *pInfo, int32_t type, SStrToken* prefix, SStrToken* pPatterns) { if (pInfo->pMiscInfo == NULL) { pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo)); @@ -1052,6 +1078,24 @@ void setCreateDbInfo(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDb pInfo->pMiscInfo->dbOpt.ignoreExists = pIgExists->n; // sql.y has: ifnotexists(X) ::= IF NOT EXISTS. {X.n = 1;} } +void setCreateFuncInfo(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPath, TAOS_FIELD *output, SStrToken* bufSize, int32_t funcType) { + pInfo->type = type; + if (pInfo->pMiscInfo == NULL) { + pInfo->pMiscInfo = calloc(1, sizeof(SMiscInfo)); + } + + pInfo->pMiscInfo->funcOpt.name = *pName; + pInfo->pMiscInfo->funcOpt.path = *pPath; + pInfo->pMiscInfo->funcOpt.output = *output; + pInfo->pMiscInfo->funcOpt.type = funcType; + if (bufSize->n > 0) { + pInfo->pMiscInfo->funcOpt.bufSize = strtol(bufSize->z, NULL, 10); + } else { + pInfo->pMiscInfo->funcOpt.bufSize = 0; + } +} + + void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctInfo *pAcctInfo) { pInfo->type = type; if (pInfo->pMiscInfo == NULL) { diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index cf67e37cf2e47c0d305c8f04766d51f71989daf5..825b7960defc2547dfd7248bdc15d157e5808b24 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -2,6 +2,7 @@ #include "taoserror.h" #include "tscompression.h" #include "tutil.h" +#include "queryLog.h" static int32_t getDataStartOffset(); static void TSBufUpdateGroupInfo(STSBuf* pTSBuf, int32_t index, STSGroupBlockInfo* pBlockInfo); @@ -266,6 +267,10 @@ static void writeDataToDisk(STSBuf* pTSBuf) { if (pBlock->tag.nType == TSDB_DATA_TYPE_BINARY || pBlock->tag.nType == TSDB_DATA_TYPE_NCHAR) { metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); metaLen += (int32_t)fwrite(pBlock->tag.pz, 1, (size_t)pBlock->tag.nLen, pTSBuf->f); + } else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) { + metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); + float tfloat = (float)pBlock->tag.dKey; + metaLen += (int32_t)fwrite(&tfloat, 1, (size_t) pBlock->tag.nLen, pTSBuf->f); } else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) { metaLen += (int32_t)fwrite(&pBlock->tag.nLen, 1, sizeof(pBlock->tag.nLen), pTSBuf->f); metaLen += (int32_t)fwrite(&pBlock->tag.i64, 1, (size_t) pBlock->tag.nLen, pTSBuf->f); @@ -350,6 +355,11 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) { sz = fread(pBlock->tag.pz, (size_t)pBlock->tag.nLen, 1, pTSBuf->f); UNUSED(sz); + } else if (pBlock->tag.nType == TSDB_DATA_TYPE_FLOAT) { + float tfloat = 0; + sz = fread(&tfloat, (size_t) pBlock->tag.nLen, 1, pTSBuf->f); + pBlock->tag.dKey = (double)tfloat; + UNUSED(sz); } else if (pBlock->tag.nType != TSDB_DATA_TYPE_NULL) { //TODO check the return value sz = fread(&pBlock->tag.i64, (size_t) pBlock->tag.nLen, 1, pTSBuf->f); UNUSED(sz); @@ -633,10 +643,15 @@ int32_t STSBufUpdateHeader(STSBuf* pTSBuf, STSBufFileHeader* pHeader) { int32_t r = fseek(pTSBuf->f, 0, SEEK_SET); if (r != 0) { + qError("fseek failed, errno:%d", errno); + return -1; + } + + size_t ws = fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f); + if (ws != 1) { + qError("ts update header fwrite failed, size:%d, expected size:%d", (int32_t)ws, (int32_t)sizeof(STSBufFileHeader)); return -1; } - - fwrite(pHeader, sizeof(STSBufFileHeader), 1, pTSBuf->f); return 0; } @@ -853,9 +868,17 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ TSBufUpdateGroupInfo(pTSBuf, pTSBuf->numOfGroups - 1, pBlockInfo); int32_t ret = fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET); - UNUSED(ret); + if (ret == -1) { + qError("fseek failed, errno:%d", errno); + tsBufDestroy(pTSBuf); + return NULL; + } size_t sz = fwrite((void*)pData, 1, len, pTSBuf->f); - UNUSED(sz); + if (sz != len) { + qError("ts data fwrite failed, write size:%d, expected size:%d", (int32_t)sz, len); + tsBufDestroy(pTSBuf); + return NULL; + } pTSBuf->fileSize += len; pTSBuf->tsOrder = order; @@ -863,9 +886,16 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ STSBufFileHeader header = { .magic = TS_COMP_FILE_MAGIC, .numOfGroup = pTSBuf->numOfGroups, .tsOrder = pTSBuf->tsOrder}; - STSBufUpdateHeader(pTSBuf, &header); + if (STSBufUpdateHeader(pTSBuf, &header) < 0) { + tsBufDestroy(pTSBuf); + return NULL; + } - taosFsync(fileno(pTSBuf->f)); + if (taosFsync(fileno(pTSBuf->f)) == -1) { + qError("fsync failed, errno:%d", errno); + tsBufDestroy(pTSBuf); + return NULL; + } return pTSBuf; } diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 7b08450d3b9461c5ccfc316485f12c5d4ea84111..04a7079128ac035542611f06559409a81bc43cf1 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -44,8 +44,7 @@ int32_t getOutputInterResultBufSize(SQueryAttr* pQueryAttr) { int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t type) { pResultRowInfo->type = type; pResultRowInfo->size = 0; - pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; - pResultRowInfo->curIndex = -1; + pResultRowInfo->curPos = -1; pResultRowInfo->capacity = size; pResultRowInfo->pResult = calloc(pResultRowInfo->capacity, POINTER_BYTES); @@ -90,10 +89,9 @@ void resetResultRowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRo SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, &groupIndex, sizeof(groupIndex), uid); taosHashRemove(pRuntimeEnv->pResultRowHashTable, (const char *)pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(groupIndex))); } - - pResultRowInfo->curIndex = -1; - pResultRowInfo->size = 0; - pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; + + pResultRowInfo->size = 0; + pResultRowInfo->curPos = -1; } int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) { diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 38ef81e7938a3635273c0cfa2cb4e86ca2e35c1e..0d140d5ffbca980f72576a39283b4765ed14da68 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -68,7 +68,7 @@ void freeParam(SQueryParam *param) { tfree(param->prevResult); } -int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qinfo_t* pQInfo, uint64_t *qId) { +int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qinfo_t* pQInfo, uint64_t qId) { assert(pQueryMsg != NULL && tsdb != NULL); int32_t code = TSDB_CODE_SUCCESS; @@ -93,12 +93,12 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi SQueriedTableInfo info = { .numOfTags = pQueryMsg->numOfTags, .numOfCols = pQueryMsg->numOfCols, .colList = pQueryMsg->tableCols}; if ((code = createQueryFunc(&info, pQueryMsg->numOfOutput, ¶m.pExprs, param.pExpr, param.pTagColumnInfo, - pQueryMsg->queryType, pQueryMsg)) != TSDB_CODE_SUCCESS) { + pQueryMsg->queryType, pQueryMsg, param.pUdfInfo)) != TSDB_CODE_SUCCESS) { goto _over; } if (param.pSecExpr != NULL) { - if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExpr, param.pExprs)) != TSDB_CODE_SUCCESS) { + if ((code = createIndirectQueryFuncExprFromMsg(pQueryMsg, pQueryMsg->secondStageOutput, ¶m.pSecExprs, param.pSecExpr, param.pExprs, param.pUdfInfo)) != TSDB_CODE_SUCCESS) { goto _over; } } @@ -162,18 +162,14 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi assert(pQueryMsg->stableQuery == isSTableQuery); (*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo, - param.pTagColumnInfo, vgId, param.sql, qId); + param.pTagColumnInfo, vgId, param.sql, qId, param.pUdfInfo); param.sql = NULL; param.pExprs = NULL; param.pSecExprs = NULL; param.pGroupbyExpr = NULL; param.pTagColumnInfo = NULL; - - if ((*pQInfo) == NULL) { - code = TSDB_CODE_QRY_OUT_OF_MEMORY; - goto _over; - } + param.pUdfInfo = NULL; code = initQInfo(&pQueryMsg->tsBuf, tsdb, NULL, *pQInfo, ¶m, (char*)pQueryMsg, pQueryMsg->prevResultLen, NULL); @@ -182,6 +178,8 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi taosArrayDestroy(param.pGroupbyExpr->columnInfo); } + destroyUdfInfo(param.pUdfInfo); + taosArrayDestroy(param.pTableIdList); param.pTableIdList = NULL; @@ -232,6 +230,7 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { // error occurs, record the error code and return to client int32_t ret = setjmp(pQInfo->runtimeEnv.env); if (ret != TSDB_CODE_SUCCESS) { + publishQueryAbortEvent(pQInfo, ret); pQInfo->code = ret; qDebug("QInfo:0x%"PRIx64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code)); return doBuildResCheck(pQInfo); @@ -240,7 +239,9 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { qDebug("QInfo:0x%"PRIx64" query task is launched", pQInfo->qId); bool newgroup = false; + publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_BEFORE_OPERATOR_EXEC); pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot, &newgroup); + publishOperatorProfEvent(pRuntimeEnv->proot, QUERY_PROF_AFTER_OPERATOR_EXEC); pRuntimeEnv->resultInfo.total += GET_NUM_OF_RESULTS(pRuntimeEnv); if (isQueryKilled(pQInfo)) { diff --git a/src/query/src/sql.c b/src/query/src/sql.c index 8764c6d0d3f2c7e31817f48bb0030a09aee2a5c4..7a9183ac060b255c49def69fcea1c5cbd74c3226 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -97,28 +97,28 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 271 +#define YYNOCODE 277 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - int yy112; - SCreateAcctInfo yy151; - tSqlExpr* yy166; - SCreateTableSql* yy182; - SSqlNode* yy236; - SRelationInfo* yy244; + TAOS_FIELD yy31; + int yy52; + SLimitVal yy126; + SWindowStateVal yy144; + SCreateTableSql* yy158; + SCreateDbInfo yy214; SSessionWindowVal yy259; - SIntervalVal yy340; - TAOS_FIELD yy343; - SWindowStateVal yy348; - int64_t yy369; - SCreateDbInfo yy382; - SLimitVal yy414; - SArray* yy441; - SCreatedTableInfo yy456; + tSqlExpr* yy370; + SRelationInfo* yy412; + SCreatedTableInfo yy432; + SSqlNode* yy464; + int64_t yy501; tVariant yy506; + SIntervalVal yy520; + SArray* yy525; + SCreateAcctInfo yy547; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -128,17 +128,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 347 -#define YYNRULE 284 -#define YYNTOKEN 190 -#define YY_MAX_SHIFT 346 -#define YY_MIN_SHIFTREDUCE 548 -#define YY_MAX_SHIFTREDUCE 831 -#define YY_ERROR_ACTION 832 -#define YY_ACCEPT_ACTION 833 -#define YY_NO_ACTION 834 -#define YY_MIN_REDUCE 835 -#define YY_MAX_REDUCE 1118 +#define YYNSTATE 362 +#define YYNRULE 289 +#define YYNTOKEN 195 +#define YY_MAX_SHIFT 361 +#define YY_MIN_SHIFTREDUCE 567 +#define YY_MAX_SHIFTREDUCE 855 +#define YY_ERROR_ACTION 856 +#define YY_ACCEPT_ACTION 857 +#define YY_NO_ACTION 858 +#define YY_MIN_REDUCE 859 +#define YY_MAX_REDUCE 1147 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -204,279 +204,288 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (734) +#define YY_ACTTAB_COUNT (753) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 23, 597, 1007, 597, 219, 344, 194, 833, 346, 598, - /* 10 */ 597, 598, 197, 54, 55, 225, 58, 59, 598, 986, - /* 20 */ 239, 48, 1094, 57, 300, 62, 60, 63, 61, 998, - /* 30 */ 998, 231, 233, 53, 52, 986, 986, 51, 50, 49, - /* 40 */ 54, 55, 35, 58, 59, 222, 223, 239, 48, 597, - /* 50 */ 57, 300, 62, 60, 63, 61, 998, 598, 152, 236, - /* 60 */ 53, 52, 235, 152, 51, 50, 49, 55, 1004, 58, - /* 70 */ 59, 772, 261, 239, 48, 240, 57, 300, 62, 60, - /* 80 */ 63, 61, 29, 195, 83, 221, 53, 52, 145, 983, - /* 90 */ 51, 50, 49, 549, 550, 551, 552, 553, 554, 555, - /* 100 */ 556, 557, 558, 559, 560, 561, 345, 773, 770, 220, - /* 110 */ 95, 77, 54, 55, 35, 58, 59, 42, 197, 239, - /* 120 */ 48, 197, 57, 300, 62, 60, 63, 61, 1095, 298, - /* 130 */ 1043, 1095, 53, 52, 197, 89, 51, 50, 49, 54, - /* 140 */ 56, 330, 58, 59, 1095, 974, 239, 48, 629, 57, - /* 150 */ 300, 62, 60, 63, 61, 268, 267, 229, 152, 53, - /* 160 */ 52, 983, 248, 51, 50, 49, 41, 296, 339, 338, - /* 170 */ 295, 294, 293, 337, 292, 336, 335, 334, 291, 333, - /* 180 */ 332, 946, 934, 935, 936, 937, 938, 939, 940, 941, - /* 190 */ 942, 943, 944, 945, 947, 948, 58, 59, 24, 984, - /* 200 */ 239, 48, 35, 57, 300, 62, 60, 63, 61, 51, - /* 210 */ 50, 49, 972, 53, 52, 205, 881, 51, 50, 49, - /* 220 */ 76, 179, 206, 35, 340, 915, 92, 129, 128, 204, - /* 230 */ 1044, 237, 280, 305, 83, 200, 238, 785, 35, 776, - /* 240 */ 774, 779, 777, 980, 780, 230, 238, 785, 715, 983, - /* 250 */ 774, 6, 777, 971, 780, 114, 108, 119, 969, 970, - /* 260 */ 34, 973, 118, 124, 127, 117, 309, 42, 217, 218, - /* 270 */ 983, 121, 301, 41, 9, 339, 338, 35, 217, 218, - /* 280 */ 337, 310, 336, 335, 334, 983, 333, 332, 232, 116, - /* 290 */ 260, 262, 75, 954, 298, 952, 953, 330, 242, 213, - /* 300 */ 955, 36, 957, 958, 956, 718, 959, 960, 62, 60, - /* 310 */ 63, 61, 775, 152, 778, 64, 53, 52, 14, 247, - /* 320 */ 51, 50, 49, 703, 982, 64, 700, 1091, 701, 35, - /* 330 */ 702, 5, 38, 169, 35, 188, 186, 184, 168, 102, - /* 340 */ 97, 101, 183, 132, 131, 130, 35, 786, 94, 91, - /* 350 */ 679, 1090, 783, 782, 244, 245, 35, 786, 35, 35, - /* 360 */ 53, 52, 891, 782, 51, 50, 49, 179, 90, 243, - /* 370 */ 781, 241, 311, 308, 307, 1089, 983, 312, 320, 319, - /* 380 */ 781, 983, 78, 282, 722, 88, 882, 253, 71, 316, - /* 390 */ 249, 179, 246, 983, 315, 314, 257, 256, 80, 317, - /* 400 */ 68, 318, 322, 983, 81, 983, 983, 343, 342, 137, - /* 410 */ 143, 141, 140, 1, 167, 3, 180, 751, 752, 734, - /* 420 */ 742, 302, 743, 1054, 689, 784, 33, 215, 72, 147, - /* 430 */ 65, 264, 26, 704, 36, 285, 691, 264, 287, 690, - /* 440 */ 806, 787, 69, 596, 74, 36, 65, 216, 93, 65, - /* 450 */ 25, 25, 16, 25, 15, 288, 107, 198, 106, 18, - /* 460 */ 707, 17, 708, 199, 705, 20, 706, 19, 201, 113, - /* 470 */ 985, 112, 678, 22, 196, 21, 126, 125, 202, 203, - /* 480 */ 208, 209, 210, 207, 193, 1114, 1106, 1053, 227, 1050, - /* 490 */ 1049, 228, 321, 258, 144, 1006, 1017, 45, 1014, 1015, - /* 500 */ 1019, 999, 265, 1036, 146, 150, 981, 274, 1035, 163, - /* 510 */ 142, 269, 164, 157, 979, 733, 165, 224, 789, 263, - /* 520 */ 166, 153, 894, 283, 290, 43, 191, 271, 39, 299, - /* 530 */ 890, 306, 73, 278, 1113, 996, 70, 47, 104, 154, - /* 540 */ 155, 1112, 281, 1109, 170, 313, 1105, 110, 279, 156, - /* 550 */ 1104, 277, 158, 275, 273, 1101, 159, 171, 270, 912, - /* 560 */ 40, 37, 44, 192, 878, 120, 876, 122, 123, 874, - /* 570 */ 873, 250, 182, 871, 870, 869, 868, 867, 866, 185, - /* 580 */ 187, 863, 861, 859, 857, 189, 854, 190, 46, 79, - /* 590 */ 84, 272, 331, 1037, 115, 323, 324, 325, 326, 327, - /* 600 */ 328, 329, 214, 341, 234, 289, 831, 252, 251, 830, - /* 610 */ 211, 212, 254, 98, 99, 255, 829, 812, 811, 259, - /* 620 */ 10, 264, 872, 284, 133, 710, 174, 134, 173, 913, - /* 630 */ 172, 175, 177, 176, 135, 178, 865, 914, 864, 2, - /* 640 */ 136, 950, 856, 855, 82, 30, 4, 266, 160, 161, - /* 650 */ 162, 962, 85, 735, 148, 149, 738, 86, 226, 740, - /* 660 */ 87, 276, 31, 744, 151, 13, 11, 32, 12, 27, - /* 670 */ 28, 286, 96, 94, 642, 638, 636, 635, 634, 631, - /* 680 */ 601, 297, 100, 7, 303, 790, 788, 8, 304, 103, - /* 690 */ 105, 66, 67, 109, 111, 681, 680, 677, 623, 36, - /* 700 */ 621, 613, 619, 615, 617, 611, 609, 645, 644, 643, - /* 710 */ 641, 640, 639, 637, 633, 632, 181, 599, 565, 835, - /* 720 */ 563, 834, 834, 834, 834, 834, 834, 834, 834, 834, - /* 730 */ 834, 834, 138, 139, + /* 0 */ 206, 618, 245, 618, 618, 97, 244, 228, 359, 619, + /* 10 */ 1123, 619, 619, 56, 57, 152, 60, 61, 654, 1027, + /* 20 */ 248, 50, 1036, 59, 317, 64, 62, 65, 63, 984, + /* 30 */ 249, 982, 983, 55, 54, 231, 985, 53, 52, 51, + /* 40 */ 986, 1002, 987, 988, 53, 52, 51, 568, 569, 570, + /* 50 */ 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, + /* 60 */ 581, 360, 206, 257, 229, 159, 206, 56, 57, 37, + /* 70 */ 60, 61, 1124, 174, 248, 50, 1124, 59, 317, 64, + /* 80 */ 62, 65, 63, 277, 276, 29, 79, 55, 54, 1033, + /* 90 */ 206, 53, 52, 51, 56, 57, 315, 60, 61, 234, + /* 100 */ 1124, 248, 50, 1014, 59, 317, 64, 62, 65, 63, + /* 110 */ 358, 357, 144, 230, 55, 54, 85, 1011, 53, 52, + /* 120 */ 51, 56, 58, 240, 60, 61, 347, 1014, 248, 50, + /* 130 */ 94, 59, 317, 64, 62, 65, 63, 794, 1073, 242, + /* 140 */ 289, 55, 54, 1014, 618, 53, 52, 51, 57, 23, + /* 150 */ 60, 61, 619, 44, 248, 50, 1000, 59, 317, 64, + /* 160 */ 62, 65, 63, 997, 998, 34, 1001, 55, 54, 857, + /* 170 */ 361, 53, 52, 51, 43, 313, 354, 353, 312, 311, + /* 180 */ 310, 352, 309, 308, 307, 351, 306, 350, 349, 976, + /* 190 */ 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, + /* 200 */ 974, 975, 977, 978, 60, 61, 24, 1008, 248, 50, + /* 210 */ 257, 59, 317, 64, 62, 65, 63, 1027, 122, 1027, + /* 220 */ 175, 55, 54, 37, 209, 53, 52, 51, 247, 809, + /* 230 */ 347, 215, 798, 232, 801, 270, 804, 135, 134, 214, + /* 240 */ 315, 247, 809, 322, 85, 798, 14, 801, 37, 804, + /* 250 */ 93, 159, 726, 241, 203, 723, 800, 724, 803, 725, + /* 260 */ 226, 227, 257, 16, 318, 15, 37, 238, 5, 40, + /* 270 */ 178, 1011, 1012, 226, 227, 177, 103, 108, 99, 107, + /* 280 */ 96, 44, 204, 253, 254, 210, 64, 62, 65, 63, + /* 290 */ 355, 945, 159, 302, 55, 54, 1010, 251, 53, 52, + /* 300 */ 51, 1013, 78, 269, 256, 77, 120, 114, 125, 66, + /* 310 */ 239, 702, 222, 124, 1011, 130, 133, 123, 37, 197, + /* 320 */ 195, 193, 66, 127, 1072, 37, 192, 139, 138, 137, + /* 330 */ 136, 799, 159, 802, 37, 43, 999, 354, 353, 337, + /* 340 */ 336, 37, 352, 262, 810, 805, 351, 37, 350, 349, + /* 350 */ 37, 806, 266, 265, 742, 55, 54, 810, 805, 53, + /* 360 */ 52, 51, 326, 291, 806, 90, 1011, 727, 728, 327, + /* 370 */ 37, 37, 252, 1011, 250, 807, 325, 324, 328, 258, + /* 380 */ 82, 255, 1011, 332, 331, 329, 150, 148, 147, 1011, + /* 390 */ 907, 333, 83, 917, 334, 1011, 908, 188, 1011, 271, + /* 400 */ 188, 739, 92, 188, 70, 91, 1, 176, 3, 189, + /* 410 */ 775, 776, 758, 38, 335, 339, 80, 273, 1011, 1011, + /* 420 */ 766, 767, 73, 712, 294, 33, 154, 9, 714, 273, + /* 430 */ 296, 713, 796, 830, 67, 26, 246, 38, 38, 746, + /* 440 */ 811, 319, 67, 76, 95, 67, 71, 25, 1120, 617, + /* 450 */ 808, 132, 131, 113, 25, 112, 1119, 6, 297, 18, + /* 460 */ 1118, 17, 74, 25, 731, 729, 732, 730, 797, 20, + /* 470 */ 1083, 19, 119, 224, 118, 701, 22, 225, 21, 207, + /* 480 */ 208, 211, 205, 212, 213, 217, 218, 219, 216, 202, + /* 490 */ 1143, 1082, 1135, 236, 267, 1079, 1078, 237, 338, 151, + /* 500 */ 1035, 1046, 47, 1065, 1043, 149, 1064, 1025, 1028, 1044, + /* 510 */ 274, 1048, 153, 170, 157, 1009, 278, 283, 171, 1007, + /* 520 */ 172, 233, 166, 280, 161, 757, 160, 173, 162, 922, + /* 530 */ 163, 299, 300, 301, 304, 305, 287, 292, 45, 290, + /* 540 */ 75, 200, 288, 813, 272, 41, 72, 49, 316, 164, + /* 550 */ 916, 323, 1142, 110, 1141, 1138, 286, 179, 330, 1134, + /* 560 */ 284, 116, 1133, 1130, 180, 282, 942, 42, 39, 46, + /* 570 */ 201, 904, 279, 126, 48, 902, 128, 129, 900, 899, + /* 580 */ 259, 191, 897, 896, 895, 894, 893, 892, 891, 194, + /* 590 */ 196, 888, 886, 884, 882, 198, 879, 199, 303, 81, + /* 600 */ 86, 348, 281, 1066, 121, 340, 341, 342, 343, 344, + /* 610 */ 223, 345, 346, 356, 855, 243, 298, 260, 261, 854, + /* 620 */ 263, 220, 221, 264, 853, 836, 104, 921, 920, 105, + /* 630 */ 835, 268, 273, 10, 293, 734, 275, 84, 30, 87, + /* 640 */ 898, 890, 182, 943, 186, 181, 184, 140, 183, 187, + /* 650 */ 185, 141, 142, 889, 4, 143, 980, 881, 880, 944, + /* 660 */ 759, 165, 167, 168, 155, 169, 762, 156, 2, 990, + /* 670 */ 88, 235, 764, 89, 285, 31, 768, 158, 11, 12, + /* 680 */ 13, 32, 27, 295, 28, 96, 98, 101, 35, 100, + /* 690 */ 632, 36, 102, 667, 665, 664, 663, 661, 660, 659, + /* 700 */ 656, 314, 622, 106, 7, 320, 812, 814, 8, 321, + /* 710 */ 109, 111, 68, 69, 115, 704, 703, 38, 117, 700, + /* 720 */ 648, 646, 638, 644, 640, 642, 636, 634, 670, 669, + /* 730 */ 668, 666, 662, 658, 657, 190, 620, 585, 583, 859, + /* 740 */ 858, 858, 858, 858, 858, 858, 858, 858, 858, 858, + /* 750 */ 858, 145, 146, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 259, 1, 194, 1, 193, 194, 259, 191, 192, 9, - /* 10 */ 1, 9, 259, 13, 14, 238, 16, 17, 9, 242, - /* 20 */ 20, 21, 269, 23, 24, 25, 26, 27, 28, 240, - /* 30 */ 240, 238, 238, 33, 34, 242, 242, 37, 38, 39, - /* 40 */ 13, 14, 194, 16, 17, 256, 256, 20, 21, 1, - /* 50 */ 23, 24, 25, 26, 27, 28, 240, 9, 194, 200, - /* 60 */ 33, 34, 200, 194, 37, 38, 39, 14, 260, 16, - /* 70 */ 17, 1, 256, 20, 21, 200, 23, 24, 25, 26, - /* 80 */ 27, 28, 80, 259, 80, 237, 33, 34, 194, 241, - /* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51, - /* 100 */ 52, 53, 54, 55, 56, 57, 58, 37, 81, 61, - /* 110 */ 201, 111, 13, 14, 194, 16, 17, 113, 259, 20, - /* 120 */ 21, 259, 23, 24, 25, 26, 27, 28, 269, 82, - /* 130 */ 266, 269, 33, 34, 259, 266, 37, 38, 39, 13, - /* 140 */ 14, 84, 16, 17, 269, 236, 20, 21, 5, 23, - /* 150 */ 24, 25, 26, 27, 28, 261, 262, 237, 194, 33, - /* 160 */ 34, 241, 194, 37, 38, 39, 92, 93, 94, 95, - /* 170 */ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, - /* 180 */ 106, 215, 216, 217, 218, 219, 220, 221, 222, 223, - /* 190 */ 224, 225, 226, 227, 228, 229, 16, 17, 44, 231, - /* 200 */ 20, 21, 194, 23, 24, 25, 26, 27, 28, 37, - /* 210 */ 38, 39, 0, 33, 34, 61, 199, 37, 38, 39, - /* 220 */ 201, 204, 68, 194, 213, 214, 201, 73, 74, 75, - /* 230 */ 266, 60, 268, 79, 80, 259, 1, 2, 194, 5, - /* 240 */ 5, 7, 7, 194, 9, 237, 1, 2, 91, 241, - /* 250 */ 5, 80, 7, 234, 9, 62, 63, 64, 233, 234, - /* 260 */ 235, 236, 69, 70, 71, 72, 237, 113, 33, 34, - /* 270 */ 241, 78, 37, 92, 117, 94, 95, 194, 33, 34, - /* 280 */ 99, 237, 101, 102, 103, 241, 105, 106, 239, 76, - /* 290 */ 136, 81, 138, 215, 82, 217, 218, 84, 68, 145, - /* 300 */ 222, 91, 224, 225, 226, 37, 228, 229, 25, 26, - /* 310 */ 27, 28, 5, 194, 7, 80, 33, 34, 80, 68, - /* 320 */ 37, 38, 39, 2, 241, 80, 5, 259, 7, 194, - /* 330 */ 9, 62, 63, 64, 194, 62, 63, 64, 69, 70, - /* 340 */ 71, 72, 69, 70, 71, 72, 194, 112, 110, 111, - /* 350 */ 5, 259, 118, 118, 33, 34, 194, 112, 194, 194, - /* 360 */ 33, 34, 199, 118, 37, 38, 39, 204, 243, 139, - /* 370 */ 135, 141, 237, 143, 144, 259, 241, 237, 33, 34, - /* 380 */ 135, 241, 257, 264, 116, 266, 199, 137, 91, 237, - /* 390 */ 139, 204, 141, 241, 143, 144, 146, 147, 81, 237, - /* 400 */ 91, 237, 237, 241, 81, 241, 241, 65, 66, 67, - /* 410 */ 62, 63, 64, 202, 203, 197, 198, 126, 127, 81, - /* 420 */ 81, 15, 81, 232, 81, 118, 80, 259, 131, 91, - /* 430 */ 91, 114, 91, 112, 91, 81, 81, 114, 81, 81, - /* 440 */ 81, 81, 133, 81, 80, 91, 91, 259, 91, 91, - /* 450 */ 91, 91, 140, 91, 142, 109, 140, 259, 142, 140, - /* 460 */ 5, 142, 7, 259, 5, 140, 7, 142, 259, 140, - /* 470 */ 242, 142, 108, 140, 259, 142, 76, 77, 259, 259, - /* 480 */ 259, 259, 259, 259, 259, 242, 242, 232, 232, 232, - /* 490 */ 232, 232, 232, 194, 194, 194, 194, 258, 194, 194, - /* 500 */ 194, 240, 240, 267, 194, 194, 240, 194, 267, 244, - /* 510 */ 60, 263, 194, 250, 194, 118, 194, 263, 112, 195, - /* 520 */ 194, 254, 194, 124, 194, 194, 194, 263, 194, 194, - /* 530 */ 194, 194, 130, 263, 194, 255, 132, 129, 194, 253, - /* 540 */ 252, 194, 128, 194, 194, 194, 194, 194, 123, 251, - /* 550 */ 194, 122, 249, 121, 120, 194, 248, 194, 119, 194, - /* 560 */ 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, - /* 570 */ 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, - /* 580 */ 194, 194, 194, 194, 194, 194, 194, 194, 134, 195, - /* 590 */ 195, 195, 107, 195, 90, 89, 50, 86, 88, 54, - /* 600 */ 87, 85, 195, 82, 195, 195, 5, 5, 148, 5, - /* 610 */ 195, 195, 148, 201, 201, 5, 5, 94, 93, 137, - /* 620 */ 80, 114, 195, 109, 196, 81, 206, 196, 210, 212, - /* 630 */ 211, 209, 208, 207, 196, 205, 195, 214, 195, 202, - /* 640 */ 196, 230, 195, 195, 115, 80, 197, 91, 247, 246, - /* 650 */ 245, 230, 91, 81, 80, 91, 81, 80, 1, 81, - /* 660 */ 80, 80, 91, 81, 80, 80, 125, 91, 125, 80, - /* 670 */ 80, 109, 76, 110, 9, 5, 5, 5, 5, 5, - /* 680 */ 83, 15, 76, 80, 24, 112, 81, 80, 58, 142, - /* 690 */ 142, 16, 16, 142, 142, 5, 5, 81, 5, 91, - /* 700 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 710 */ 5, 5, 5, 5, 5, 5, 91, 83, 60, 0, - /* 720 */ 59, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 730 */ 270, 270, 21, 21, 270, 270, 270, 270, 270, 270, - /* 740 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 750 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 760 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 770 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 780 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 790 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 800 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 810 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 820 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 830 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 840 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 850 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 860 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 870 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 880 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 890 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 900 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 910 */ 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, - /* 920 */ 270, 270, 270, 270, + /* 0 */ 265, 1, 205, 1, 1, 206, 205, 198, 199, 9, + /* 10 */ 275, 9, 9, 13, 14, 199, 16, 17, 5, 246, + /* 20 */ 20, 21, 199, 23, 24, 25, 26, 27, 28, 222, + /* 30 */ 205, 224, 225, 33, 34, 262, 229, 37, 38, 39, + /* 40 */ 233, 242, 235, 236, 37, 38, 39, 45, 46, 47, + /* 50 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + /* 60 */ 58, 59, 265, 199, 62, 199, 265, 13, 14, 199, + /* 70 */ 16, 17, 275, 209, 20, 21, 275, 23, 24, 25, + /* 80 */ 26, 27, 28, 267, 268, 82, 86, 33, 34, 266, + /* 90 */ 265, 37, 38, 39, 13, 14, 84, 16, 17, 244, + /* 100 */ 275, 20, 21, 248, 23, 24, 25, 26, 27, 28, + /* 110 */ 66, 67, 68, 243, 33, 34, 82, 247, 37, 38, + /* 120 */ 39, 13, 14, 244, 16, 17, 90, 248, 20, 21, + /* 130 */ 206, 23, 24, 25, 26, 27, 28, 83, 272, 244, + /* 140 */ 274, 33, 34, 248, 1, 37, 38, 39, 14, 265, + /* 150 */ 16, 17, 9, 119, 20, 21, 0, 23, 24, 25, + /* 160 */ 26, 27, 28, 239, 240, 241, 242, 33, 34, 196, + /* 170 */ 197, 37, 38, 39, 98, 99, 100, 101, 102, 103, + /* 180 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 222, + /* 190 */ 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, + /* 200 */ 233, 234, 235, 236, 16, 17, 44, 199, 20, 21, + /* 210 */ 199, 23, 24, 25, 26, 27, 28, 246, 78, 246, + /* 220 */ 209, 33, 34, 199, 62, 37, 38, 39, 1, 2, + /* 230 */ 90, 69, 5, 262, 7, 262, 9, 75, 76, 77, + /* 240 */ 84, 1, 2, 81, 82, 5, 82, 7, 199, 9, + /* 250 */ 86, 199, 2, 245, 265, 5, 5, 7, 7, 9, + /* 260 */ 33, 34, 199, 145, 37, 147, 199, 243, 63, 64, + /* 270 */ 65, 247, 209, 33, 34, 70, 71, 72, 73, 74, + /* 280 */ 116, 119, 265, 33, 34, 265, 25, 26, 27, 28, + /* 290 */ 220, 221, 199, 88, 33, 34, 247, 69, 37, 38, + /* 300 */ 39, 248, 206, 141, 69, 143, 63, 64, 65, 82, + /* 310 */ 243, 5, 150, 70, 247, 72, 73, 74, 199, 63, + /* 320 */ 64, 65, 82, 80, 272, 199, 70, 71, 72, 73, + /* 330 */ 74, 5, 199, 7, 199, 98, 240, 100, 101, 33, + /* 340 */ 34, 199, 105, 142, 117, 118, 109, 199, 111, 112, + /* 350 */ 199, 124, 151, 152, 37, 33, 34, 117, 118, 37, + /* 360 */ 38, 39, 243, 270, 124, 272, 247, 117, 118, 243, + /* 370 */ 199, 199, 144, 247, 146, 124, 148, 149, 243, 144, + /* 380 */ 83, 146, 247, 148, 149, 243, 63, 64, 65, 247, + /* 390 */ 204, 243, 83, 204, 243, 247, 204, 211, 247, 83, + /* 400 */ 211, 97, 249, 211, 97, 272, 207, 208, 202, 203, + /* 410 */ 132, 133, 83, 97, 243, 243, 263, 120, 247, 247, + /* 420 */ 83, 83, 97, 83, 83, 82, 97, 123, 83, 120, + /* 430 */ 83, 83, 1, 83, 97, 97, 61, 97, 97, 122, + /* 440 */ 83, 15, 97, 82, 97, 97, 139, 97, 265, 83, + /* 450 */ 124, 78, 79, 145, 97, 147, 265, 82, 115, 145, + /* 460 */ 265, 147, 137, 97, 5, 5, 7, 7, 37, 145, + /* 470 */ 238, 147, 145, 265, 147, 114, 145, 265, 147, 265, + /* 480 */ 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, + /* 490 */ 248, 238, 248, 238, 199, 238, 238, 238, 238, 199, + /* 500 */ 199, 199, 264, 273, 199, 61, 273, 261, 246, 199, + /* 510 */ 246, 199, 199, 250, 199, 246, 269, 199, 199, 199, + /* 520 */ 199, 269, 254, 269, 259, 124, 260, 199, 258, 199, + /* 530 */ 257, 199, 199, 199, 199, 199, 269, 130, 199, 134, + /* 540 */ 136, 199, 129, 117, 200, 199, 138, 135, 199, 256, + /* 550 */ 199, 199, 199, 199, 199, 199, 128, 199, 199, 199, + /* 560 */ 127, 199, 199, 199, 199, 126, 199, 199, 199, 199, + /* 570 */ 199, 199, 125, 199, 140, 199, 199, 199, 199, 199, + /* 580 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, + /* 590 */ 199, 199, 199, 199, 199, 199, 199, 199, 89, 200, + /* 600 */ 200, 113, 200, 200, 96, 95, 51, 92, 94, 55, + /* 610 */ 200, 93, 91, 84, 5, 200, 200, 153, 5, 5, + /* 620 */ 153, 200, 200, 5, 5, 100, 206, 210, 210, 206, + /* 630 */ 99, 142, 120, 82, 115, 83, 97, 121, 82, 97, + /* 640 */ 200, 200, 217, 219, 215, 218, 216, 201, 213, 212, + /* 650 */ 214, 201, 201, 200, 202, 201, 237, 200, 200, 221, + /* 660 */ 83, 255, 253, 252, 82, 251, 83, 97, 207, 237, + /* 670 */ 82, 1, 83, 82, 82, 97, 83, 82, 131, 131, + /* 680 */ 82, 97, 82, 115, 82, 116, 78, 71, 87, 86, + /* 690 */ 5, 87, 86, 9, 5, 5, 5, 5, 5, 5, + /* 700 */ 5, 15, 85, 78, 82, 24, 83, 117, 82, 59, + /* 710 */ 147, 147, 16, 16, 147, 5, 5, 97, 147, 83, + /* 720 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 730 */ 5, 5, 5, 5, 5, 97, 85, 61, 60, 0, + /* 740 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 750 */ 276, 21, 21, 276, 276, 276, 276, 276, 276, 276, + /* 760 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 770 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 780 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 790 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 800 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 810 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 820 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 830 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 840 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 850 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 860 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 870 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 880 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 890 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 900 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 910 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 920 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 930 */ 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, + /* 940 */ 276, 276, 276, 276, 276, 276, 276, 276, }; -#define YY_SHIFT_COUNT (346) +#define YY_SHIFT_COUNT (361) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (719) +#define YY_SHIFT_MAX (739) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 154, 74, 74, 181, 181, 47, 235, 245, 245, 2, - /* 10 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - /* 20 */ 9, 9, 9, 0, 48, 245, 321, 321, 321, 4, - /* 30 */ 4, 9, 9, 9, 212, 9, 9, 213, 47, 57, - /* 40 */ 57, 143, 734, 734, 734, 245, 245, 245, 245, 245, - /* 50 */ 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, - /* 60 */ 245, 245, 245, 245, 245, 321, 321, 321, 345, 345, - /* 70 */ 345, 345, 345, 345, 345, 9, 9, 9, 268, 9, - /* 80 */ 9, 9, 4, 4, 9, 9, 9, 9, 291, 291, - /* 90 */ 157, 4, 9, 9, 9, 9, 9, 9, 9, 9, - /* 100 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - /* 110 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - /* 120 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - /* 130 */ 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, - /* 140 */ 9, 9, 9, 9, 450, 450, 450, 397, 397, 397, - /* 150 */ 450, 397, 450, 402, 404, 408, 399, 414, 425, 429, - /* 160 */ 432, 434, 439, 454, 450, 450, 450, 485, 47, 47, - /* 170 */ 450, 450, 504, 506, 546, 511, 510, 545, 513, 516, - /* 180 */ 485, 143, 450, 521, 521, 450, 521, 450, 521, 450, - /* 190 */ 450, 734, 734, 27, 99, 99, 126, 99, 53, 180, - /* 200 */ 283, 283, 283, 283, 193, 269, 273, 327, 327, 327, - /* 210 */ 327, 230, 251, 250, 238, 172, 172, 234, 307, 342, - /* 220 */ 348, 210, 317, 323, 338, 339, 341, 309, 297, 343, - /* 230 */ 354, 355, 357, 358, 346, 359, 360, 70, 171, 406, - /* 240 */ 362, 312, 316, 319, 455, 459, 325, 329, 364, 333, - /* 250 */ 400, 601, 460, 602, 604, 464, 610, 611, 523, 525, - /* 260 */ 482, 507, 514, 540, 529, 544, 565, 556, 561, 572, - /* 270 */ 574, 575, 564, 577, 578, 580, 657, 581, 582, 584, - /* 280 */ 571, 541, 576, 543, 585, 514, 589, 562, 590, 563, - /* 290 */ 596, 665, 670, 671, 672, 673, 674, 597, 666, 606, - /* 300 */ 603, 605, 573, 607, 660, 630, 675, 547, 548, 608, - /* 310 */ 608, 608, 608, 676, 551, 552, 608, 608, 608, 690, - /* 320 */ 691, 616, 608, 693, 695, 696, 697, 698, 699, 700, - /* 330 */ 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, - /* 340 */ 625, 634, 711, 712, 658, 661, 719, + /* 0 */ 162, 76, 76, 237, 237, 12, 227, 240, 240, 3, + /* 10 */ 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, + /* 20 */ 143, 143, 143, 0, 2, 240, 250, 250, 250, 34, + /* 30 */ 34, 143, 143, 143, 156, 143, 143, 143, 143, 140, + /* 40 */ 12, 36, 36, 13, 753, 753, 753, 240, 240, 240, + /* 50 */ 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, + /* 60 */ 240, 240, 240, 240, 240, 240, 240, 250, 250, 250, + /* 70 */ 306, 306, 306, 306, 306, 306, 306, 143, 143, 143, + /* 80 */ 317, 143, 143, 143, 34, 34, 143, 143, 143, 143, + /* 90 */ 278, 278, 304, 34, 143, 143, 143, 143, 143, 143, + /* 100 */ 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, + /* 110 */ 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, + /* 120 */ 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, + /* 130 */ 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, + /* 140 */ 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, + /* 150 */ 143, 444, 444, 444, 401, 401, 401, 444, 401, 444, + /* 160 */ 404, 408, 407, 412, 405, 413, 428, 433, 439, 447, + /* 170 */ 434, 444, 444, 444, 509, 509, 488, 12, 12, 444, + /* 180 */ 444, 508, 510, 555, 515, 514, 554, 518, 521, 488, + /* 190 */ 13, 444, 529, 529, 444, 529, 444, 529, 444, 444, + /* 200 */ 753, 753, 54, 81, 81, 108, 81, 134, 188, 205, + /* 210 */ 261, 261, 261, 261, 243, 256, 322, 322, 322, 322, + /* 220 */ 228, 235, 201, 164, 7, 7, 251, 326, 44, 323, + /* 230 */ 316, 297, 309, 329, 337, 338, 307, 325, 340, 341, + /* 240 */ 345, 347, 348, 343, 350, 357, 431, 375, 426, 366, + /* 250 */ 118, 308, 314, 459, 460, 324, 327, 361, 331, 373, + /* 260 */ 609, 464, 613, 614, 467, 618, 619, 525, 531, 489, + /* 270 */ 512, 519, 551, 516, 552, 556, 539, 542, 577, 582, + /* 280 */ 583, 570, 588, 589, 591, 670, 592, 593, 595, 578, + /* 290 */ 547, 584, 548, 598, 519, 600, 568, 602, 569, 608, + /* 300 */ 601, 603, 616, 685, 604, 606, 684, 689, 690, 691, + /* 310 */ 692, 693, 694, 695, 617, 686, 625, 622, 623, 590, + /* 320 */ 626, 681, 650, 696, 563, 564, 620, 620, 620, 620, + /* 330 */ 697, 567, 571, 620, 620, 620, 710, 711, 636, 620, + /* 340 */ 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, + /* 350 */ 725, 726, 727, 728, 729, 638, 651, 730, 731, 676, + /* 360 */ 678, 739, }; -#define YY_REDUCE_COUNT (192) -#define YY_REDUCE_MIN (-259) -#define YY_REDUCE_MAX (449) +#define YY_REDUCE_COUNT (201) +#define YY_REDUCE_MIN (-265) +#define YY_REDUCE_MAX (461) static const short yy_reduce_ofst[] = { - /* 0 */ -184, -34, -34, 78, 78, 25, -141, -138, -125, -106, - /* 10 */ -152, -36, 119, -80, 8, 29, 44, 135, 140, 152, - /* 20 */ 162, 164, 165, -192, -189, -247, -223, -207, -206, -211, - /* 30 */ -210, -136, -131, 49, -91, -32, 83, 17, 19, 163, - /* 40 */ 187, 11, 125, 211, 218, -259, -253, -176, -24, 68, - /* 50 */ 92, 116, 168, 188, 198, 204, 209, 215, 219, 220, - /* 60 */ 221, 222, 223, 224, 225, 228, 243, 244, 191, 255, - /* 70 */ 256, 257, 258, 259, 260, 299, 300, 301, 239, 302, - /* 80 */ 304, 305, 261, 262, 306, 310, 311, 313, 236, 241, - /* 90 */ 265, 266, 318, 320, 322, 326, 328, 330, 331, 332, - /* 100 */ 334, 335, 336, 337, 340, 344, 347, 349, 350, 351, - /* 110 */ 352, 353, 356, 361, 363, 365, 366, 367, 368, 369, - /* 120 */ 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, - /* 130 */ 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, - /* 140 */ 390, 391, 392, 393, 324, 394, 395, 248, 254, 264, - /* 150 */ 396, 270, 398, 280, 267, 286, 288, 298, 263, 303, - /* 160 */ 308, 401, 403, 405, 407, 409, 410, 411, 412, 413, - /* 170 */ 415, 416, 417, 419, 418, 420, 422, 426, 424, 430, - /* 180 */ 421, 423, 427, 428, 431, 441, 438, 443, 444, 447, - /* 190 */ 448, 437, 449, + /* 0 */ -27, -33, -33, -193, -193, -76, -203, -199, -175, -184, + /* 10 */ -130, -134, 93, 24, 67, 119, 126, 135, 142, 148, + /* 20 */ 151, 171, 172, -177, -191, -265, -145, -121, -105, -227, + /* 30 */ -29, 52, 133, 8, -201, -136, 11, 63, 49, 186, + /* 40 */ 96, 189, 192, 70, 153, 199, 206, -116, -11, 17, + /* 50 */ 20, 183, 191, 195, 208, 212, 214, 215, 216, 217, + /* 60 */ 218, 219, 220, 221, 222, 223, 224, 53, 242, 244, + /* 70 */ 232, 253, 255, 257, 258, 259, 260, 295, 300, 301, + /* 80 */ 238, 302, 305, 310, 262, 264, 312, 313, 315, 318, + /* 90 */ 230, 233, 263, 269, 319, 320, 321, 328, 330, 332, + /* 100 */ 333, 334, 335, 336, 339, 342, 346, 349, 351, 352, + /* 110 */ 353, 354, 355, 356, 358, 359, 360, 362, 363, 364, + /* 120 */ 365, 367, 368, 369, 370, 371, 372, 374, 376, 377, + /* 130 */ 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, + /* 140 */ 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, + /* 150 */ 398, 344, 399, 400, 247, 252, 254, 402, 267, 403, + /* 160 */ 246, 266, 265, 270, 273, 293, 406, 268, 409, 411, + /* 170 */ 414, 410, 415, 416, 417, 418, 419, 420, 423, 421, + /* 180 */ 422, 424, 427, 425, 435, 430, 436, 429, 437, 432, + /* 190 */ 438, 440, 446, 450, 441, 451, 453, 454, 457, 458, + /* 200 */ 461, 452, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 832, 949, 892, 961, 879, 889, 1097, 1097, 1097, 832, - /* 10 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 20 */ 832, 832, 832, 1008, 851, 1097, 832, 832, 832, 832, - /* 30 */ 832, 832, 832, 832, 889, 832, 832, 895, 889, 895, - /* 40 */ 895, 832, 1003, 933, 951, 832, 832, 832, 832, 832, - /* 50 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 60 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 70 */ 832, 832, 832, 832, 832, 832, 832, 832, 1010, 1016, - /* 80 */ 1013, 832, 832, 832, 1018, 832, 832, 832, 1040, 1040, - /* 90 */ 1001, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 100 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 110 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 120 */ 877, 832, 875, 832, 832, 832, 832, 832, 832, 832, - /* 130 */ 832, 832, 832, 832, 832, 832, 832, 862, 832, 832, - /* 140 */ 832, 832, 832, 832, 853, 853, 853, 832, 832, 832, - /* 150 */ 853, 832, 853, 1047, 1051, 1045, 1033, 1041, 1032, 1028, - /* 160 */ 1026, 1024, 1023, 1055, 853, 853, 853, 893, 889, 889, - /* 170 */ 853, 853, 911, 909, 907, 899, 905, 901, 903, 897, - /* 180 */ 880, 832, 853, 887, 887, 853, 887, 853, 887, 853, - /* 190 */ 853, 933, 951, 832, 1056, 1046, 832, 1096, 1086, 1085, - /* 200 */ 1092, 1084, 1083, 1082, 832, 832, 832, 1078, 1081, 1080, - /* 210 */ 1079, 832, 832, 832, 832, 1088, 1087, 832, 832, 832, - /* 220 */ 832, 832, 832, 832, 832, 832, 832, 1052, 1048, 832, - /* 230 */ 832, 832, 832, 832, 832, 832, 832, 832, 1058, 832, - /* 240 */ 832, 832, 832, 832, 832, 832, 832, 832, 963, 832, - /* 250 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 260 */ 832, 1000, 832, 832, 832, 832, 832, 1012, 1011, 832, - /* 270 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 280 */ 1042, 832, 1034, 832, 832, 975, 832, 832, 832, 832, - /* 290 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 300 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 1115, - /* 310 */ 1110, 1111, 1108, 832, 832, 832, 1107, 1102, 1103, 832, - /* 320 */ 832, 832, 1100, 832, 832, 832, 832, 832, 832, 832, - /* 330 */ 832, 832, 832, 832, 832, 832, 832, 832, 832, 832, - /* 340 */ 917, 832, 860, 858, 832, 849, 832, + /* 0 */ 856, 979, 918, 989, 905, 915, 1126, 1126, 1126, 856, + /* 10 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 20 */ 856, 856, 856, 1037, 876, 1126, 856, 856, 856, 856, + /* 30 */ 856, 856, 856, 856, 915, 856, 856, 856, 856, 925, + /* 40 */ 915, 925, 925, 856, 1032, 963, 981, 856, 856, 856, + /* 50 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 60 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 70 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 80 */ 1039, 1045, 1042, 856, 856, 856, 1047, 856, 856, 856, + /* 90 */ 1069, 1069, 1030, 856, 856, 856, 856, 856, 856, 856, + /* 100 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 110 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 120 */ 856, 856, 856, 856, 856, 856, 903, 856, 901, 856, + /* 130 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 140 */ 856, 856, 856, 856, 887, 856, 856, 856, 856, 856, + /* 150 */ 856, 878, 878, 878, 856, 856, 856, 878, 856, 878, + /* 160 */ 1076, 1080, 1062, 1074, 1070, 1061, 1057, 1055, 1053, 1052, + /* 170 */ 1084, 878, 878, 878, 923, 923, 919, 915, 915, 878, + /* 180 */ 878, 941, 939, 937, 929, 935, 931, 933, 927, 906, + /* 190 */ 856, 878, 913, 913, 878, 913, 878, 913, 878, 878, + /* 200 */ 963, 981, 856, 1085, 1075, 856, 1125, 1115, 1114, 856, + /* 210 */ 1121, 1113, 1112, 1111, 856, 856, 1107, 1110, 1109, 1108, + /* 220 */ 856, 856, 856, 856, 1117, 1116, 856, 856, 856, 856, + /* 230 */ 856, 856, 856, 856, 856, 856, 1081, 1077, 856, 856, + /* 240 */ 856, 856, 856, 856, 856, 856, 856, 1087, 856, 856, + /* 250 */ 856, 856, 856, 856, 856, 856, 856, 991, 856, 856, + /* 260 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 270 */ 1029, 856, 856, 856, 856, 856, 1041, 1040, 856, 856, + /* 280 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 1071, + /* 290 */ 856, 1063, 856, 856, 1003, 856, 856, 856, 856, 856, + /* 300 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 310 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 320 */ 856, 856, 856, 856, 856, 856, 1144, 1139, 1140, 1137, + /* 330 */ 856, 856, 856, 1136, 1131, 1132, 856, 856, 856, 1129, + /* 340 */ 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, + /* 350 */ 856, 856, 856, 856, 856, 947, 856, 885, 883, 856, + /* 360 */ 874, 856, }; /********** End of lemon-generated parsing tables *****************************/ @@ -543,6 +552,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* SHOW => nothing */ 0, /* DATABASES => nothing */ 0, /* TOPICS => nothing */ + 0, /* FUNCTIONS => nothing */ 0, /* MNODES => nothing */ 0, /* DNODES => nothing */ 0, /* ACCOUNTS => nothing */ @@ -566,6 +576,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* VGROUPS => nothing */ 0, /* DROP => nothing */ 0, /* TOPIC => nothing */ + 0, /* FUNCTION => nothing */ 0, /* DNODE => nothing */ 0, /* USER => nothing */ 0, /* ACCOUNT => nothing */ @@ -580,6 +591,10 @@ static const YYCODETYPE yyFallback[] = { 0, /* RP => nothing */ 0, /* IF => nothing */ 0, /* EXISTS => nothing */ + 0, /* AS => nothing */ + 0, /* OUTPUTTYPE => nothing */ + 0, /* AGGREGATE => nothing */ + 0, /* BUFSIZE => nothing */ 0, /* PPS => nothing */ 0, /* TSERIES => nothing */ 0, /* DBS => nothing */ @@ -607,8 +622,8 @@ static const YYCODETYPE yyFallback[] = { 0, /* UNSIGNED => nothing */ 0, /* TAGS => nothing */ 0, /* USING => nothing */ - 0, /* AS => nothing */ 1, /* NULL => ID */ + 1, /* NOW => ID */ 0, /* SELECT => nothing */ 0, /* UNION => nothing */ 1, /* ALL => ID */ @@ -631,7 +646,6 @@ static const YYCODETYPE yyFallback[] = { 0, /* SLIMIT => nothing */ 0, /* SOFFSET => nothing */ 0, /* WHERE => nothing */ - 1, /* NOW => ID */ 0, /* RESET => nothing */ 0, /* QUERY => nothing */ 0, /* SYNCDB => nothing */ @@ -819,229 +833,235 @@ static const char *const yyTokenName[] = { /* 44 */ "SHOW", /* 45 */ "DATABASES", /* 46 */ "TOPICS", - /* 47 */ "MNODES", - /* 48 */ "DNODES", - /* 49 */ "ACCOUNTS", - /* 50 */ "USERS", - /* 51 */ "MODULES", - /* 52 */ "QUERIES", - /* 53 */ "CONNECTIONS", - /* 54 */ "STREAMS", - /* 55 */ "VARIABLES", - /* 56 */ "SCORES", - /* 57 */ "GRANTS", - /* 58 */ "VNODES", - /* 59 */ "IPTOKEN", - /* 60 */ "DOT", - /* 61 */ "CREATE", - /* 62 */ "TABLE", - /* 63 */ "STABLE", - /* 64 */ "DATABASE", - /* 65 */ "TABLES", - /* 66 */ "STABLES", - /* 67 */ "VGROUPS", - /* 68 */ "DROP", - /* 69 */ "TOPIC", - /* 70 */ "DNODE", - /* 71 */ "USER", - /* 72 */ "ACCOUNT", - /* 73 */ "USE", - /* 74 */ "DESCRIBE", - /* 75 */ "ALTER", - /* 76 */ "PASS", - /* 77 */ "PRIVILEGE", - /* 78 */ "LOCAL", - /* 79 */ "COMPACT", - /* 80 */ "LP", - /* 81 */ "RP", - /* 82 */ "IF", - /* 83 */ "EXISTS", - /* 84 */ "PPS", - /* 85 */ "TSERIES", - /* 86 */ "DBS", - /* 87 */ "STORAGE", - /* 88 */ "QTIME", - /* 89 */ "CONNS", - /* 90 */ "STATE", - /* 91 */ "COMMA", - /* 92 */ "KEEP", - /* 93 */ "CACHE", - /* 94 */ "REPLICA", - /* 95 */ "QUORUM", - /* 96 */ "DAYS", - /* 97 */ "MINROWS", - /* 98 */ "MAXROWS", - /* 99 */ "BLOCKS", - /* 100 */ "CTIME", - /* 101 */ "WAL", - /* 102 */ "FSYNC", - /* 103 */ "COMP", - /* 104 */ "PRECISION", - /* 105 */ "UPDATE", - /* 106 */ "CACHELAST", - /* 107 */ "PARTITIONS", - /* 108 */ "UNSIGNED", - /* 109 */ "TAGS", - /* 110 */ "USING", - /* 111 */ "AS", - /* 112 */ "NULL", - /* 113 */ "SELECT", - /* 114 */ "UNION", - /* 115 */ "ALL", - /* 116 */ "DISTINCT", - /* 117 */ "FROM", - /* 118 */ "VARIABLE", - /* 119 */ "INTERVAL", - /* 120 */ "SESSION", - /* 121 */ "STATE_WINDOW", - /* 122 */ "FILL", - /* 123 */ "SLIDING", - /* 124 */ "ORDER", - /* 125 */ "BY", - /* 126 */ "ASC", - /* 127 */ "DESC", - /* 128 */ "GROUP", - /* 129 */ "HAVING", - /* 130 */ "LIMIT", - /* 131 */ "OFFSET", - /* 132 */ "SLIMIT", - /* 133 */ "SOFFSET", - /* 134 */ "WHERE", - /* 135 */ "NOW", - /* 136 */ "RESET", - /* 137 */ "QUERY", - /* 138 */ "SYNCDB", - /* 139 */ "ADD", - /* 140 */ "COLUMN", - /* 141 */ "MODIFY", - /* 142 */ "TAG", - /* 143 */ "CHANGE", - /* 144 */ "SET", - /* 145 */ "KILL", - /* 146 */ "CONNECTION", - /* 147 */ "STREAM", - /* 148 */ "COLON", - /* 149 */ "ABORT", - /* 150 */ "AFTER", - /* 151 */ "ATTACH", - /* 152 */ "BEFORE", - /* 153 */ "BEGIN", - /* 154 */ "CASCADE", - /* 155 */ "CLUSTER", - /* 156 */ "CONFLICT", - /* 157 */ "COPY", - /* 158 */ "DEFERRED", - /* 159 */ "DELIMITERS", - /* 160 */ "DETACH", - /* 161 */ "EACH", - /* 162 */ "END", - /* 163 */ "EXPLAIN", - /* 164 */ "FAIL", - /* 165 */ "FOR", - /* 166 */ "IGNORE", - /* 167 */ "IMMEDIATE", - /* 168 */ "INITIALLY", - /* 169 */ "INSTEAD", - /* 170 */ "MATCH", - /* 171 */ "KEY", - /* 172 */ "OF", - /* 173 */ "RAISE", - /* 174 */ "REPLACE", - /* 175 */ "RESTRICT", - /* 176 */ "ROW", - /* 177 */ "STATEMENT", - /* 178 */ "TRIGGER", - /* 179 */ "VIEW", - /* 180 */ "SEMI", - /* 181 */ "NONE", - /* 182 */ "PREV", - /* 183 */ "LINEAR", - /* 184 */ "IMPORT", - /* 185 */ "TBNAME", - /* 186 */ "JOIN", - /* 187 */ "INSERT", - /* 188 */ "INTO", - /* 189 */ "VALUES", - /* 190 */ "error", - /* 191 */ "program", - /* 192 */ "cmd", - /* 193 */ "dbPrefix", - /* 194 */ "ids", - /* 195 */ "cpxName", - /* 196 */ "ifexists", - /* 197 */ "alter_db_optr", - /* 198 */ "alter_topic_optr", - /* 199 */ "acct_optr", - /* 200 */ "exprlist", - /* 201 */ "ifnotexists", - /* 202 */ "db_optr", - /* 203 */ "topic_optr", - /* 204 */ "pps", - /* 205 */ "tseries", - /* 206 */ "dbs", - /* 207 */ "streams", - /* 208 */ "storage", - /* 209 */ "qtime", - /* 210 */ "users", - /* 211 */ "conns", - /* 212 */ "state", - /* 213 */ "intitemlist", - /* 214 */ "intitem", - /* 215 */ "keep", - /* 216 */ "cache", - /* 217 */ "replica", - /* 218 */ "quorum", - /* 219 */ "days", - /* 220 */ "minrows", - /* 221 */ "maxrows", - /* 222 */ "blocks", - /* 223 */ "ctime", - /* 224 */ "wal", - /* 225 */ "fsync", - /* 226 */ "comp", - /* 227 */ "prec", - /* 228 */ "update", - /* 229 */ "cachelast", - /* 230 */ "partitions", - /* 231 */ "typename", - /* 232 */ "signed", - /* 233 */ "create_table_args", - /* 234 */ "create_stable_args", - /* 235 */ "create_table_list", - /* 236 */ "create_from_stable", - /* 237 */ "columnlist", - /* 238 */ "tagitemlist", - /* 239 */ "tagNamelist", - /* 240 */ "select", - /* 241 */ "column", - /* 242 */ "tagitem", - /* 243 */ "selcollist", - /* 244 */ "from", - /* 245 */ "where_opt", - /* 246 */ "interval_opt", - /* 247 */ "session_option", - /* 248 */ "windowstate_option", - /* 249 */ "fill_opt", - /* 250 */ "sliding_opt", - /* 251 */ "groupby_opt", - /* 252 */ "orderby_opt", - /* 253 */ "having_opt", - /* 254 */ "slimit_opt", - /* 255 */ "limit_opt", - /* 256 */ "union", - /* 257 */ "sclp", - /* 258 */ "distinct", - /* 259 */ "expr", - /* 260 */ "as", - /* 261 */ "tablelist", - /* 262 */ "sub", - /* 263 */ "tmvar", - /* 264 */ "sortlist", - /* 265 */ "sortitem", - /* 266 */ "item", - /* 267 */ "sortorder", - /* 268 */ "grouplist", - /* 269 */ "expritem", + /* 47 */ "FUNCTIONS", + /* 48 */ "MNODES", + /* 49 */ "DNODES", + /* 50 */ "ACCOUNTS", + /* 51 */ "USERS", + /* 52 */ "MODULES", + /* 53 */ "QUERIES", + /* 54 */ "CONNECTIONS", + /* 55 */ "STREAMS", + /* 56 */ "VARIABLES", + /* 57 */ "SCORES", + /* 58 */ "GRANTS", + /* 59 */ "VNODES", + /* 60 */ "IPTOKEN", + /* 61 */ "DOT", + /* 62 */ "CREATE", + /* 63 */ "TABLE", + /* 64 */ "STABLE", + /* 65 */ "DATABASE", + /* 66 */ "TABLES", + /* 67 */ "STABLES", + /* 68 */ "VGROUPS", + /* 69 */ "DROP", + /* 70 */ "TOPIC", + /* 71 */ "FUNCTION", + /* 72 */ "DNODE", + /* 73 */ "USER", + /* 74 */ "ACCOUNT", + /* 75 */ "USE", + /* 76 */ "DESCRIBE", + /* 77 */ "ALTER", + /* 78 */ "PASS", + /* 79 */ "PRIVILEGE", + /* 80 */ "LOCAL", + /* 81 */ "COMPACT", + /* 82 */ "LP", + /* 83 */ "RP", + /* 84 */ "IF", + /* 85 */ "EXISTS", + /* 86 */ "AS", + /* 87 */ "OUTPUTTYPE", + /* 88 */ "AGGREGATE", + /* 89 */ "BUFSIZE", + /* 90 */ "PPS", + /* 91 */ "TSERIES", + /* 92 */ "DBS", + /* 93 */ "STORAGE", + /* 94 */ "QTIME", + /* 95 */ "CONNS", + /* 96 */ "STATE", + /* 97 */ "COMMA", + /* 98 */ "KEEP", + /* 99 */ "CACHE", + /* 100 */ "REPLICA", + /* 101 */ "QUORUM", + /* 102 */ "DAYS", + /* 103 */ "MINROWS", + /* 104 */ "MAXROWS", + /* 105 */ "BLOCKS", + /* 106 */ "CTIME", + /* 107 */ "WAL", + /* 108 */ "FSYNC", + /* 109 */ "COMP", + /* 110 */ "PRECISION", + /* 111 */ "UPDATE", + /* 112 */ "CACHELAST", + /* 113 */ "PARTITIONS", + /* 114 */ "UNSIGNED", + /* 115 */ "TAGS", + /* 116 */ "USING", + /* 117 */ "NULL", + /* 118 */ "NOW", + /* 119 */ "SELECT", + /* 120 */ "UNION", + /* 121 */ "ALL", + /* 122 */ "DISTINCT", + /* 123 */ "FROM", + /* 124 */ "VARIABLE", + /* 125 */ "INTERVAL", + /* 126 */ "SESSION", + /* 127 */ "STATE_WINDOW", + /* 128 */ "FILL", + /* 129 */ "SLIDING", + /* 130 */ "ORDER", + /* 131 */ "BY", + /* 132 */ "ASC", + /* 133 */ "DESC", + /* 134 */ "GROUP", + /* 135 */ "HAVING", + /* 136 */ "LIMIT", + /* 137 */ "OFFSET", + /* 138 */ "SLIMIT", + /* 139 */ "SOFFSET", + /* 140 */ "WHERE", + /* 141 */ "RESET", + /* 142 */ "QUERY", + /* 143 */ "SYNCDB", + /* 144 */ "ADD", + /* 145 */ "COLUMN", + /* 146 */ "MODIFY", + /* 147 */ "TAG", + /* 148 */ "CHANGE", + /* 149 */ "SET", + /* 150 */ "KILL", + /* 151 */ "CONNECTION", + /* 152 */ "STREAM", + /* 153 */ "COLON", + /* 154 */ "ABORT", + /* 155 */ "AFTER", + /* 156 */ "ATTACH", + /* 157 */ "BEFORE", + /* 158 */ "BEGIN", + /* 159 */ "CASCADE", + /* 160 */ "CLUSTER", + /* 161 */ "CONFLICT", + /* 162 */ "COPY", + /* 163 */ "DEFERRED", + /* 164 */ "DELIMITERS", + /* 165 */ "DETACH", + /* 166 */ "EACH", + /* 167 */ "END", + /* 168 */ "EXPLAIN", + /* 169 */ "FAIL", + /* 170 */ "FOR", + /* 171 */ "IGNORE", + /* 172 */ "IMMEDIATE", + /* 173 */ "INITIALLY", + /* 174 */ "INSTEAD", + /* 175 */ "MATCH", + /* 176 */ "KEY", + /* 177 */ "OF", + /* 178 */ "RAISE", + /* 179 */ "REPLACE", + /* 180 */ "RESTRICT", + /* 181 */ "ROW", + /* 182 */ "STATEMENT", + /* 183 */ "TRIGGER", + /* 184 */ "VIEW", + /* 185 */ "SEMI", + /* 186 */ "NONE", + /* 187 */ "PREV", + /* 188 */ "LINEAR", + /* 189 */ "IMPORT", + /* 190 */ "TBNAME", + /* 191 */ "JOIN", + /* 192 */ "INSERT", + /* 193 */ "INTO", + /* 194 */ "VALUES", + /* 195 */ "error", + /* 196 */ "program", + /* 197 */ "cmd", + /* 198 */ "dbPrefix", + /* 199 */ "ids", + /* 200 */ "cpxName", + /* 201 */ "ifexists", + /* 202 */ "alter_db_optr", + /* 203 */ "alter_topic_optr", + /* 204 */ "acct_optr", + /* 205 */ "exprlist", + /* 206 */ "ifnotexists", + /* 207 */ "db_optr", + /* 208 */ "topic_optr", + /* 209 */ "typename", + /* 210 */ "bufsize", + /* 211 */ "pps", + /* 212 */ "tseries", + /* 213 */ "dbs", + /* 214 */ "streams", + /* 215 */ "storage", + /* 216 */ "qtime", + /* 217 */ "users", + /* 218 */ "conns", + /* 219 */ "state", + /* 220 */ "intitemlist", + /* 221 */ "intitem", + /* 222 */ "keep", + /* 223 */ "cache", + /* 224 */ "replica", + /* 225 */ "quorum", + /* 226 */ "days", + /* 227 */ "minrows", + /* 228 */ "maxrows", + /* 229 */ "blocks", + /* 230 */ "ctime", + /* 231 */ "wal", + /* 232 */ "fsync", + /* 233 */ "comp", + /* 234 */ "prec", + /* 235 */ "update", + /* 236 */ "cachelast", + /* 237 */ "partitions", + /* 238 */ "signed", + /* 239 */ "create_table_args", + /* 240 */ "create_stable_args", + /* 241 */ "create_table_list", + /* 242 */ "create_from_stable", + /* 243 */ "columnlist", + /* 244 */ "tagitemlist", + /* 245 */ "tagNamelist", + /* 246 */ "select", + /* 247 */ "column", + /* 248 */ "tagitem", + /* 249 */ "selcollist", + /* 250 */ "from", + /* 251 */ "where_opt", + /* 252 */ "interval_opt", + /* 253 */ "session_option", + /* 254 */ "windowstate_option", + /* 255 */ "fill_opt", + /* 256 */ "sliding_opt", + /* 257 */ "groupby_opt", + /* 258 */ "having_opt", + /* 259 */ "orderby_opt", + /* 260 */ "slimit_opt", + /* 261 */ "limit_opt", + /* 262 */ "union", + /* 263 */ "sclp", + /* 264 */ "distinct", + /* 265 */ "expr", + /* 266 */ "as", + /* 267 */ "tablelist", + /* 268 */ "sub", + /* 269 */ "tmvar", + /* 270 */ "sortlist", + /* 271 */ "sortitem", + /* 272 */ "item", + /* 273 */ "sortorder", + /* 274 */ "grouplist", + /* 275 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1052,287 +1072,292 @@ static const char *const yyRuleName[] = { /* 0 */ "program ::= cmd", /* 1 */ "cmd ::= SHOW DATABASES", /* 2 */ "cmd ::= SHOW TOPICS", - /* 3 */ "cmd ::= SHOW MNODES", - /* 4 */ "cmd ::= SHOW DNODES", - /* 5 */ "cmd ::= SHOW ACCOUNTS", - /* 6 */ "cmd ::= SHOW USERS", - /* 7 */ "cmd ::= SHOW MODULES", - /* 8 */ "cmd ::= SHOW QUERIES", - /* 9 */ "cmd ::= SHOW CONNECTIONS", - /* 10 */ "cmd ::= SHOW STREAMS", - /* 11 */ "cmd ::= SHOW VARIABLES", - /* 12 */ "cmd ::= SHOW SCORES", - /* 13 */ "cmd ::= SHOW GRANTS", - /* 14 */ "cmd ::= SHOW VNODES", - /* 15 */ "cmd ::= SHOW VNODES IPTOKEN", - /* 16 */ "dbPrefix ::=", - /* 17 */ "dbPrefix ::= ids DOT", - /* 18 */ "cpxName ::=", - /* 19 */ "cpxName ::= DOT ids", - /* 20 */ "cmd ::= SHOW CREATE TABLE ids cpxName", - /* 21 */ "cmd ::= SHOW CREATE STABLE ids cpxName", - /* 22 */ "cmd ::= SHOW CREATE DATABASE ids", - /* 23 */ "cmd ::= SHOW dbPrefix TABLES", - /* 24 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", - /* 25 */ "cmd ::= SHOW dbPrefix STABLES", - /* 26 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", - /* 27 */ "cmd ::= SHOW dbPrefix VGROUPS", - /* 28 */ "cmd ::= SHOW dbPrefix VGROUPS ids", - /* 29 */ "cmd ::= DROP TABLE ifexists ids cpxName", - /* 30 */ "cmd ::= DROP STABLE ifexists ids cpxName", - /* 31 */ "cmd ::= DROP DATABASE ifexists ids", - /* 32 */ "cmd ::= DROP TOPIC ifexists ids", - /* 33 */ "cmd ::= DROP DNODE ids", - /* 34 */ "cmd ::= DROP USER ids", - /* 35 */ "cmd ::= DROP ACCOUNT ids", - /* 36 */ "cmd ::= USE ids", - /* 37 */ "cmd ::= DESCRIBE ids cpxName", - /* 38 */ "cmd ::= ALTER USER ids PASS ids", - /* 39 */ "cmd ::= ALTER USER ids PRIVILEGE ids", - /* 40 */ "cmd ::= ALTER DNODE ids ids", - /* 41 */ "cmd ::= ALTER DNODE ids ids ids", - /* 42 */ "cmd ::= ALTER LOCAL ids", - /* 43 */ "cmd ::= ALTER LOCAL ids ids", - /* 44 */ "cmd ::= ALTER DATABASE ids alter_db_optr", - /* 45 */ "cmd ::= ALTER TOPIC ids alter_topic_optr", - /* 46 */ "cmd ::= ALTER ACCOUNT ids acct_optr", - /* 47 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", - /* 48 */ "cmd ::= COMPACT VNODES IN LP exprlist RP", - /* 49 */ "ids ::= ID", - /* 50 */ "ids ::= STRING", - /* 51 */ "ifexists ::= IF EXISTS", - /* 52 */ "ifexists ::=", - /* 53 */ "ifnotexists ::= IF NOT EXISTS", - /* 54 */ "ifnotexists ::=", - /* 55 */ "cmd ::= CREATE DNODE ids", - /* 56 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", - /* 57 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", - /* 58 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr", - /* 59 */ "cmd ::= CREATE USER ids PASS ids", - /* 60 */ "pps ::=", - /* 61 */ "pps ::= PPS INTEGER", - /* 62 */ "tseries ::=", - /* 63 */ "tseries ::= TSERIES INTEGER", - /* 64 */ "dbs ::=", - /* 65 */ "dbs ::= DBS INTEGER", - /* 66 */ "streams ::=", - /* 67 */ "streams ::= STREAMS INTEGER", - /* 68 */ "storage ::=", - /* 69 */ "storage ::= STORAGE INTEGER", - /* 70 */ "qtime ::=", - /* 71 */ "qtime ::= QTIME INTEGER", - /* 72 */ "users ::=", - /* 73 */ "users ::= USERS INTEGER", - /* 74 */ "conns ::=", - /* 75 */ "conns ::= CONNS INTEGER", - /* 76 */ "state ::=", - /* 77 */ "state ::= STATE ids", - /* 78 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", - /* 79 */ "intitemlist ::= intitemlist COMMA intitem", - /* 80 */ "intitemlist ::= intitem", - /* 81 */ "intitem ::= INTEGER", - /* 82 */ "keep ::= KEEP intitemlist", - /* 83 */ "cache ::= CACHE INTEGER", - /* 84 */ "replica ::= REPLICA INTEGER", - /* 85 */ "quorum ::= QUORUM INTEGER", - /* 86 */ "days ::= DAYS INTEGER", - /* 87 */ "minrows ::= MINROWS INTEGER", - /* 88 */ "maxrows ::= MAXROWS INTEGER", - /* 89 */ "blocks ::= BLOCKS INTEGER", - /* 90 */ "ctime ::= CTIME INTEGER", - /* 91 */ "wal ::= WAL INTEGER", - /* 92 */ "fsync ::= FSYNC INTEGER", - /* 93 */ "comp ::= COMP INTEGER", - /* 94 */ "prec ::= PRECISION STRING", - /* 95 */ "update ::= UPDATE INTEGER", - /* 96 */ "cachelast ::= CACHELAST INTEGER", - /* 97 */ "partitions ::= PARTITIONS INTEGER", - /* 98 */ "db_optr ::=", - /* 99 */ "db_optr ::= db_optr cache", - /* 100 */ "db_optr ::= db_optr replica", - /* 101 */ "db_optr ::= db_optr quorum", - /* 102 */ "db_optr ::= db_optr days", - /* 103 */ "db_optr ::= db_optr minrows", - /* 104 */ "db_optr ::= db_optr maxrows", - /* 105 */ "db_optr ::= db_optr blocks", - /* 106 */ "db_optr ::= db_optr ctime", - /* 107 */ "db_optr ::= db_optr wal", - /* 108 */ "db_optr ::= db_optr fsync", - /* 109 */ "db_optr ::= db_optr comp", - /* 110 */ "db_optr ::= db_optr prec", - /* 111 */ "db_optr ::= db_optr keep", - /* 112 */ "db_optr ::= db_optr update", - /* 113 */ "db_optr ::= db_optr cachelast", - /* 114 */ "topic_optr ::= db_optr", - /* 115 */ "topic_optr ::= topic_optr partitions", - /* 116 */ "alter_db_optr ::=", - /* 117 */ "alter_db_optr ::= alter_db_optr replica", - /* 118 */ "alter_db_optr ::= alter_db_optr quorum", - /* 119 */ "alter_db_optr ::= alter_db_optr keep", - /* 120 */ "alter_db_optr ::= alter_db_optr blocks", - /* 121 */ "alter_db_optr ::= alter_db_optr comp", - /* 122 */ "alter_db_optr ::= alter_db_optr wal", - /* 123 */ "alter_db_optr ::= alter_db_optr fsync", - /* 124 */ "alter_db_optr ::= alter_db_optr update", - /* 125 */ "alter_db_optr ::= alter_db_optr cachelast", - /* 126 */ "alter_topic_optr ::= alter_db_optr", - /* 127 */ "alter_topic_optr ::= alter_topic_optr partitions", - /* 128 */ "typename ::= ids", - /* 129 */ "typename ::= ids LP signed RP", - /* 130 */ "typename ::= ids UNSIGNED", - /* 131 */ "signed ::= INTEGER", - /* 132 */ "signed ::= PLUS INTEGER", - /* 133 */ "signed ::= MINUS INTEGER", - /* 134 */ "cmd ::= CREATE TABLE create_table_args", - /* 135 */ "cmd ::= CREATE TABLE create_stable_args", - /* 136 */ "cmd ::= CREATE STABLE create_stable_args", - /* 137 */ "cmd ::= CREATE TABLE create_table_list", - /* 138 */ "create_table_list ::= create_from_stable", - /* 139 */ "create_table_list ::= create_table_list create_from_stable", - /* 140 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", - /* 141 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", - /* 142 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", - /* 143 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", - /* 144 */ "tagNamelist ::= tagNamelist COMMA ids", - /* 145 */ "tagNamelist ::= ids", - /* 146 */ "create_table_args ::= ifnotexists ids cpxName AS select", - /* 147 */ "columnlist ::= columnlist COMMA column", - /* 148 */ "columnlist ::= column", - /* 149 */ "column ::= ids typename", - /* 150 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 151 */ "tagitemlist ::= tagitem", - /* 152 */ "tagitem ::= INTEGER", - /* 153 */ "tagitem ::= FLOAT", - /* 154 */ "tagitem ::= STRING", - /* 155 */ "tagitem ::= BOOL", - /* 156 */ "tagitem ::= NULL", - /* 157 */ "tagitem ::= MINUS INTEGER", - /* 158 */ "tagitem ::= MINUS FLOAT", - /* 159 */ "tagitem ::= PLUS INTEGER", - /* 160 */ "tagitem ::= PLUS FLOAT", - /* 161 */ "select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 162 */ "select ::= LP select RP", - /* 163 */ "union ::= select", - /* 164 */ "union ::= union UNION ALL select", - /* 165 */ "cmd ::= union", - /* 166 */ "select ::= SELECT selcollist", - /* 167 */ "sclp ::= selcollist COMMA", - /* 168 */ "sclp ::=", - /* 169 */ "selcollist ::= sclp distinct expr as", - /* 170 */ "selcollist ::= sclp STAR", - /* 171 */ "as ::= AS ids", - /* 172 */ "as ::= ids", - /* 173 */ "as ::=", - /* 174 */ "distinct ::= DISTINCT", - /* 175 */ "distinct ::=", - /* 176 */ "from ::= FROM tablelist", - /* 177 */ "from ::= FROM sub", - /* 178 */ "sub ::= LP union RP", - /* 179 */ "sub ::= LP union RP ids", - /* 180 */ "sub ::= sub COMMA LP union RP ids", - /* 181 */ "tablelist ::= ids cpxName", - /* 182 */ "tablelist ::= ids cpxName ids", - /* 183 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 184 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 185 */ "tmvar ::= VARIABLE", - /* 186 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 187 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", - /* 188 */ "interval_opt ::=", - /* 189 */ "session_option ::=", - /* 190 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", - /* 191 */ "windowstate_option ::=", - /* 192 */ "windowstate_option ::= STATE_WINDOW LP ids RP", - /* 193 */ "fill_opt ::=", - /* 194 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 195 */ "fill_opt ::= FILL LP ID RP", - /* 196 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 197 */ "sliding_opt ::=", - /* 198 */ "orderby_opt ::=", - /* 199 */ "orderby_opt ::= ORDER BY sortlist", - /* 200 */ "sortlist ::= sortlist COMMA item sortorder", - /* 201 */ "sortlist ::= item sortorder", - /* 202 */ "item ::= ids cpxName", - /* 203 */ "sortorder ::= ASC", - /* 204 */ "sortorder ::= DESC", - /* 205 */ "sortorder ::=", - /* 206 */ "groupby_opt ::=", - /* 207 */ "groupby_opt ::= GROUP BY grouplist", - /* 208 */ "grouplist ::= grouplist COMMA item", - /* 209 */ "grouplist ::= item", - /* 210 */ "having_opt ::=", - /* 211 */ "having_opt ::= HAVING expr", - /* 212 */ "limit_opt ::=", - /* 213 */ "limit_opt ::= LIMIT signed", - /* 214 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 215 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 216 */ "slimit_opt ::=", - /* 217 */ "slimit_opt ::= SLIMIT signed", - /* 218 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 219 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 220 */ "where_opt ::=", - /* 221 */ "where_opt ::= WHERE expr", - /* 222 */ "expr ::= LP expr RP", - /* 223 */ "expr ::= ID", - /* 224 */ "expr ::= ID DOT ID", - /* 225 */ "expr ::= ID DOT STAR", - /* 226 */ "expr ::= INTEGER", - /* 227 */ "expr ::= MINUS INTEGER", - /* 228 */ "expr ::= PLUS INTEGER", - /* 229 */ "expr ::= FLOAT", - /* 230 */ "expr ::= MINUS FLOAT", - /* 231 */ "expr ::= PLUS FLOAT", - /* 232 */ "expr ::= STRING", - /* 233 */ "expr ::= NOW", - /* 234 */ "expr ::= VARIABLE", - /* 235 */ "expr ::= PLUS VARIABLE", - /* 236 */ "expr ::= MINUS VARIABLE", - /* 237 */ "expr ::= BOOL", - /* 238 */ "expr ::= NULL", - /* 239 */ "expr ::= ID LP exprlist RP", - /* 240 */ "expr ::= ID LP STAR RP", - /* 241 */ "expr ::= expr IS NULL", - /* 242 */ "expr ::= expr IS NOT NULL", - /* 243 */ "expr ::= expr LT expr", - /* 244 */ "expr ::= expr GT expr", - /* 245 */ "expr ::= expr LE expr", - /* 246 */ "expr ::= expr GE expr", - /* 247 */ "expr ::= expr NE expr", - /* 248 */ "expr ::= expr EQ expr", - /* 249 */ "expr ::= expr BETWEEN expr AND expr", - /* 250 */ "expr ::= expr AND expr", - /* 251 */ "expr ::= expr OR expr", - /* 252 */ "expr ::= expr PLUS expr", - /* 253 */ "expr ::= expr MINUS expr", - /* 254 */ "expr ::= expr STAR expr", - /* 255 */ "expr ::= expr SLASH expr", - /* 256 */ "expr ::= expr REM expr", - /* 257 */ "expr ::= expr LIKE expr", - /* 258 */ "expr ::= expr IN LP exprlist RP", - /* 259 */ "exprlist ::= exprlist COMMA expritem", - /* 260 */ "exprlist ::= expritem", - /* 261 */ "expritem ::= expr", - /* 262 */ "expritem ::=", - /* 263 */ "cmd ::= RESET QUERY CACHE", - /* 264 */ "cmd ::= SYNCDB ids REPLICA", - /* 265 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 266 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 267 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", - /* 268 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 269 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 270 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 271 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 272 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", - /* 273 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 274 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 275 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", - /* 276 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 277 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 278 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 279 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", - /* 280 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", - /* 281 */ "cmd ::= KILL CONNECTION INTEGER", - /* 282 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 283 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 3 */ "cmd ::= SHOW FUNCTIONS", + /* 4 */ "cmd ::= SHOW MNODES", + /* 5 */ "cmd ::= SHOW DNODES", + /* 6 */ "cmd ::= SHOW ACCOUNTS", + /* 7 */ "cmd ::= SHOW USERS", + /* 8 */ "cmd ::= SHOW MODULES", + /* 9 */ "cmd ::= SHOW QUERIES", + /* 10 */ "cmd ::= SHOW CONNECTIONS", + /* 11 */ "cmd ::= SHOW STREAMS", + /* 12 */ "cmd ::= SHOW VARIABLES", + /* 13 */ "cmd ::= SHOW SCORES", + /* 14 */ "cmd ::= SHOW GRANTS", + /* 15 */ "cmd ::= SHOW VNODES", + /* 16 */ "cmd ::= SHOW VNODES IPTOKEN", + /* 17 */ "dbPrefix ::=", + /* 18 */ "dbPrefix ::= ids DOT", + /* 19 */ "cpxName ::=", + /* 20 */ "cpxName ::= DOT ids", + /* 21 */ "cmd ::= SHOW CREATE TABLE ids cpxName", + /* 22 */ "cmd ::= SHOW CREATE STABLE ids cpxName", + /* 23 */ "cmd ::= SHOW CREATE DATABASE ids", + /* 24 */ "cmd ::= SHOW dbPrefix TABLES", + /* 25 */ "cmd ::= SHOW dbPrefix TABLES LIKE ids", + /* 26 */ "cmd ::= SHOW dbPrefix STABLES", + /* 27 */ "cmd ::= SHOW dbPrefix STABLES LIKE ids", + /* 28 */ "cmd ::= SHOW dbPrefix VGROUPS", + /* 29 */ "cmd ::= SHOW dbPrefix VGROUPS ids", + /* 30 */ "cmd ::= DROP TABLE ifexists ids cpxName", + /* 31 */ "cmd ::= DROP STABLE ifexists ids cpxName", + /* 32 */ "cmd ::= DROP DATABASE ifexists ids", + /* 33 */ "cmd ::= DROP TOPIC ifexists ids", + /* 34 */ "cmd ::= DROP FUNCTION ids", + /* 35 */ "cmd ::= DROP DNODE ids", + /* 36 */ "cmd ::= DROP USER ids", + /* 37 */ "cmd ::= DROP ACCOUNT ids", + /* 38 */ "cmd ::= USE ids", + /* 39 */ "cmd ::= DESCRIBE ids cpxName", + /* 40 */ "cmd ::= ALTER USER ids PASS ids", + /* 41 */ "cmd ::= ALTER USER ids PRIVILEGE ids", + /* 42 */ "cmd ::= ALTER DNODE ids ids", + /* 43 */ "cmd ::= ALTER DNODE ids ids ids", + /* 44 */ "cmd ::= ALTER LOCAL ids", + /* 45 */ "cmd ::= ALTER LOCAL ids ids", + /* 46 */ "cmd ::= ALTER DATABASE ids alter_db_optr", + /* 47 */ "cmd ::= ALTER TOPIC ids alter_topic_optr", + /* 48 */ "cmd ::= ALTER ACCOUNT ids acct_optr", + /* 49 */ "cmd ::= ALTER ACCOUNT ids PASS ids acct_optr", + /* 50 */ "cmd ::= COMPACT VNODES IN LP exprlist RP", + /* 51 */ "ids ::= ID", + /* 52 */ "ids ::= STRING", + /* 53 */ "ifexists ::= IF EXISTS", + /* 54 */ "ifexists ::=", + /* 55 */ "ifnotexists ::= IF NOT EXISTS", + /* 56 */ "ifnotexists ::=", + /* 57 */ "cmd ::= CREATE DNODE ids", + /* 58 */ "cmd ::= CREATE ACCOUNT ids PASS ids acct_optr", + /* 59 */ "cmd ::= CREATE DATABASE ifnotexists ids db_optr", + /* 60 */ "cmd ::= CREATE TOPIC ifnotexists ids topic_optr", + /* 61 */ "cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize", + /* 62 */ "cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize", + /* 63 */ "cmd ::= CREATE USER ids PASS ids", + /* 64 */ "bufsize ::=", + /* 65 */ "bufsize ::= BUFSIZE INTEGER", + /* 66 */ "pps ::=", + /* 67 */ "pps ::= PPS INTEGER", + /* 68 */ "tseries ::=", + /* 69 */ "tseries ::= TSERIES INTEGER", + /* 70 */ "dbs ::=", + /* 71 */ "dbs ::= DBS INTEGER", + /* 72 */ "streams ::=", + /* 73 */ "streams ::= STREAMS INTEGER", + /* 74 */ "storage ::=", + /* 75 */ "storage ::= STORAGE INTEGER", + /* 76 */ "qtime ::=", + /* 77 */ "qtime ::= QTIME INTEGER", + /* 78 */ "users ::=", + /* 79 */ "users ::= USERS INTEGER", + /* 80 */ "conns ::=", + /* 81 */ "conns ::= CONNS INTEGER", + /* 82 */ "state ::=", + /* 83 */ "state ::= STATE ids", + /* 84 */ "acct_optr ::= pps tseries storage streams qtime dbs users conns state", + /* 85 */ "intitemlist ::= intitemlist COMMA intitem", + /* 86 */ "intitemlist ::= intitem", + /* 87 */ "intitem ::= INTEGER", + /* 88 */ "keep ::= KEEP intitemlist", + /* 89 */ "cache ::= CACHE INTEGER", + /* 90 */ "replica ::= REPLICA INTEGER", + /* 91 */ "quorum ::= QUORUM INTEGER", + /* 92 */ "days ::= DAYS INTEGER", + /* 93 */ "minrows ::= MINROWS INTEGER", + /* 94 */ "maxrows ::= MAXROWS INTEGER", + /* 95 */ "blocks ::= BLOCKS INTEGER", + /* 96 */ "ctime ::= CTIME INTEGER", + /* 97 */ "wal ::= WAL INTEGER", + /* 98 */ "fsync ::= FSYNC INTEGER", + /* 99 */ "comp ::= COMP INTEGER", + /* 100 */ "prec ::= PRECISION STRING", + /* 101 */ "update ::= UPDATE INTEGER", + /* 102 */ "cachelast ::= CACHELAST INTEGER", + /* 103 */ "partitions ::= PARTITIONS INTEGER", + /* 104 */ "db_optr ::=", + /* 105 */ "db_optr ::= db_optr cache", + /* 106 */ "db_optr ::= db_optr replica", + /* 107 */ "db_optr ::= db_optr quorum", + /* 108 */ "db_optr ::= db_optr days", + /* 109 */ "db_optr ::= db_optr minrows", + /* 110 */ "db_optr ::= db_optr maxrows", + /* 111 */ "db_optr ::= db_optr blocks", + /* 112 */ "db_optr ::= db_optr ctime", + /* 113 */ "db_optr ::= db_optr wal", + /* 114 */ "db_optr ::= db_optr fsync", + /* 115 */ "db_optr ::= db_optr comp", + /* 116 */ "db_optr ::= db_optr prec", + /* 117 */ "db_optr ::= db_optr keep", + /* 118 */ "db_optr ::= db_optr update", + /* 119 */ "db_optr ::= db_optr cachelast", + /* 120 */ "topic_optr ::= db_optr", + /* 121 */ "topic_optr ::= topic_optr partitions", + /* 122 */ "alter_db_optr ::=", + /* 123 */ "alter_db_optr ::= alter_db_optr replica", + /* 124 */ "alter_db_optr ::= alter_db_optr quorum", + /* 125 */ "alter_db_optr ::= alter_db_optr keep", + /* 126 */ "alter_db_optr ::= alter_db_optr blocks", + /* 127 */ "alter_db_optr ::= alter_db_optr comp", + /* 128 */ "alter_db_optr ::= alter_db_optr update", + /* 129 */ "alter_db_optr ::= alter_db_optr cachelast", + /* 130 */ "alter_topic_optr ::= alter_db_optr", + /* 131 */ "alter_topic_optr ::= alter_topic_optr partitions", + /* 132 */ "typename ::= ids", + /* 133 */ "typename ::= ids LP signed RP", + /* 134 */ "typename ::= ids UNSIGNED", + /* 135 */ "signed ::= INTEGER", + /* 136 */ "signed ::= PLUS INTEGER", + /* 137 */ "signed ::= MINUS INTEGER", + /* 138 */ "cmd ::= CREATE TABLE create_table_args", + /* 139 */ "cmd ::= CREATE TABLE create_stable_args", + /* 140 */ "cmd ::= CREATE STABLE create_stable_args", + /* 141 */ "cmd ::= CREATE TABLE create_table_list", + /* 142 */ "create_table_list ::= create_from_stable", + /* 143 */ "create_table_list ::= create_table_list create_from_stable", + /* 144 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", + /* 145 */ "create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", + /* 146 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", + /* 147 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP", + /* 148 */ "tagNamelist ::= tagNamelist COMMA ids", + /* 149 */ "tagNamelist ::= ids", + /* 150 */ "create_table_args ::= ifnotexists ids cpxName AS select", + /* 151 */ "columnlist ::= columnlist COMMA column", + /* 152 */ "columnlist ::= column", + /* 153 */ "column ::= ids typename", + /* 154 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 155 */ "tagitemlist ::= tagitem", + /* 156 */ "tagitem ::= INTEGER", + /* 157 */ "tagitem ::= FLOAT", + /* 158 */ "tagitem ::= STRING", + /* 159 */ "tagitem ::= BOOL", + /* 160 */ "tagitem ::= NULL", + /* 161 */ "tagitem ::= NOW", + /* 162 */ "tagitem ::= MINUS INTEGER", + /* 163 */ "tagitem ::= MINUS FLOAT", + /* 164 */ "tagitem ::= PLUS INTEGER", + /* 165 */ "tagitem ::= PLUS FLOAT", + /* 166 */ "select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt", + /* 167 */ "select ::= LP select RP", + /* 168 */ "union ::= select", + /* 169 */ "union ::= union UNION ALL select", + /* 170 */ "cmd ::= union", + /* 171 */ "select ::= SELECT selcollist", + /* 172 */ "sclp ::= selcollist COMMA", + /* 173 */ "sclp ::=", + /* 174 */ "selcollist ::= sclp distinct expr as", + /* 175 */ "selcollist ::= sclp STAR", + /* 176 */ "as ::= AS ids", + /* 177 */ "as ::= ids", + /* 178 */ "as ::=", + /* 179 */ "distinct ::= DISTINCT", + /* 180 */ "distinct ::=", + /* 181 */ "from ::= FROM tablelist", + /* 182 */ "from ::= FROM sub", + /* 183 */ "sub ::= LP union RP", + /* 184 */ "sub ::= LP union RP ids", + /* 185 */ "sub ::= sub COMMA LP union RP ids", + /* 186 */ "tablelist ::= ids cpxName", + /* 187 */ "tablelist ::= ids cpxName ids", + /* 188 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 189 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 190 */ "tmvar ::= VARIABLE", + /* 191 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 192 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", + /* 193 */ "interval_opt ::=", + /* 194 */ "session_option ::=", + /* 195 */ "session_option ::= SESSION LP ids cpxName COMMA tmvar RP", + /* 196 */ "windowstate_option ::=", + /* 197 */ "windowstate_option ::= STATE_WINDOW LP ids RP", + /* 198 */ "fill_opt ::=", + /* 199 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 200 */ "fill_opt ::= FILL LP ID RP", + /* 201 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 202 */ "sliding_opt ::=", + /* 203 */ "orderby_opt ::=", + /* 204 */ "orderby_opt ::= ORDER BY sortlist", + /* 205 */ "sortlist ::= sortlist COMMA item sortorder", + /* 206 */ "sortlist ::= item sortorder", + /* 207 */ "item ::= ids cpxName", + /* 208 */ "sortorder ::= ASC", + /* 209 */ "sortorder ::= DESC", + /* 210 */ "sortorder ::=", + /* 211 */ "groupby_opt ::=", + /* 212 */ "groupby_opt ::= GROUP BY grouplist", + /* 213 */ "grouplist ::= grouplist COMMA item", + /* 214 */ "grouplist ::= item", + /* 215 */ "having_opt ::=", + /* 216 */ "having_opt ::= HAVING expr", + /* 217 */ "limit_opt ::=", + /* 218 */ "limit_opt ::= LIMIT signed", + /* 219 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 220 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 221 */ "slimit_opt ::=", + /* 222 */ "slimit_opt ::= SLIMIT signed", + /* 223 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 224 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 225 */ "where_opt ::=", + /* 226 */ "where_opt ::= WHERE expr", + /* 227 */ "expr ::= LP expr RP", + /* 228 */ "expr ::= ID", + /* 229 */ "expr ::= ID DOT ID", + /* 230 */ "expr ::= ID DOT STAR", + /* 231 */ "expr ::= INTEGER", + /* 232 */ "expr ::= MINUS INTEGER", + /* 233 */ "expr ::= PLUS INTEGER", + /* 234 */ "expr ::= FLOAT", + /* 235 */ "expr ::= MINUS FLOAT", + /* 236 */ "expr ::= PLUS FLOAT", + /* 237 */ "expr ::= STRING", + /* 238 */ "expr ::= NOW", + /* 239 */ "expr ::= VARIABLE", + /* 240 */ "expr ::= PLUS VARIABLE", + /* 241 */ "expr ::= MINUS VARIABLE", + /* 242 */ "expr ::= BOOL", + /* 243 */ "expr ::= NULL", + /* 244 */ "expr ::= ID LP exprlist RP", + /* 245 */ "expr ::= ID LP STAR RP", + /* 246 */ "expr ::= expr IS NULL", + /* 247 */ "expr ::= expr IS NOT NULL", + /* 248 */ "expr ::= expr LT expr", + /* 249 */ "expr ::= expr GT expr", + /* 250 */ "expr ::= expr LE expr", + /* 251 */ "expr ::= expr GE expr", + /* 252 */ "expr ::= expr NE expr", + /* 253 */ "expr ::= expr EQ expr", + /* 254 */ "expr ::= expr BETWEEN expr AND expr", + /* 255 */ "expr ::= expr AND expr", + /* 256 */ "expr ::= expr OR expr", + /* 257 */ "expr ::= expr PLUS expr", + /* 258 */ "expr ::= expr MINUS expr", + /* 259 */ "expr ::= expr STAR expr", + /* 260 */ "expr ::= expr SLASH expr", + /* 261 */ "expr ::= expr REM expr", + /* 262 */ "expr ::= expr LIKE expr", + /* 263 */ "expr ::= expr IN LP exprlist RP", + /* 264 */ "exprlist ::= exprlist COMMA expritem", + /* 265 */ "exprlist ::= expritem", + /* 266 */ "expritem ::= expr", + /* 267 */ "expritem ::=", + /* 268 */ "cmd ::= RESET QUERY CACHE", + /* 269 */ "cmd ::= SYNCDB ids REPLICA", + /* 270 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 271 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 272 */ "cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist", + /* 273 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 274 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 275 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 276 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 277 */ "cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist", + /* 278 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 279 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 280 */ "cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist", + /* 281 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 282 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 283 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 284 */ "cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem", + /* 285 */ "cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist", + /* 286 */ "cmd ::= KILL CONNECTION INTEGER", + /* 287 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 288 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1453,58 +1478,58 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 200: /* exprlist */ - case 243: /* selcollist */ - case 257: /* sclp */ + case 205: /* exprlist */ + case 249: /* selcollist */ + case 263: /* sclp */ { -tSqlExprListDestroy((yypminor->yy441)); +tSqlExprListDestroy((yypminor->yy525)); } break; - case 213: /* intitemlist */ - case 215: /* keep */ - case 237: /* columnlist */ - case 238: /* tagitemlist */ - case 239: /* tagNamelist */ - case 249: /* fill_opt */ - case 251: /* groupby_opt */ - case 252: /* orderby_opt */ - case 264: /* sortlist */ - case 268: /* grouplist */ -{ -taosArrayDestroy((yypminor->yy441)); + case 220: /* intitemlist */ + case 222: /* keep */ + case 243: /* columnlist */ + case 244: /* tagitemlist */ + case 245: /* tagNamelist */ + case 255: /* fill_opt */ + case 257: /* groupby_opt */ + case 259: /* orderby_opt */ + case 270: /* sortlist */ + case 274: /* grouplist */ +{ +taosArrayDestroy((yypminor->yy525)); } break; - case 235: /* create_table_list */ + case 241: /* create_table_list */ { -destroyCreateTableSql((yypminor->yy182)); +destroyCreateTableSql((yypminor->yy158)); } break; - case 240: /* select */ + case 246: /* select */ { -destroySqlNode((yypminor->yy236)); +destroySqlNode((yypminor->yy464)); } break; - case 244: /* from */ - case 261: /* tablelist */ - case 262: /* sub */ + case 250: /* from */ + case 267: /* tablelist */ + case 268: /* sub */ { -destroyRelationInfo((yypminor->yy244)); +destroyRelationInfo((yypminor->yy412)); } break; - case 245: /* where_opt */ - case 253: /* having_opt */ - case 259: /* expr */ - case 269: /* expritem */ + case 251: /* where_opt */ + case 258: /* having_opt */ + case 265: /* expr */ + case 275: /* expritem */ { -tSqlExprDestroy((yypminor->yy166)); +tSqlExprDestroy((yypminor->yy370)); } break; - case 256: /* union */ + case 262: /* union */ { -destroyAllSqlNode((yypminor->yy441)); +destroyAllSqlNode((yypminor->yy525)); } break; - case 265: /* sortitem */ + case 271: /* sortitem */ { tVariantDestroy(&(yypminor->yy506)); } @@ -1800,290 +1825,295 @@ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 191, -1 }, /* (0) program ::= cmd */ - { 192, -2 }, /* (1) cmd ::= SHOW DATABASES */ - { 192, -2 }, /* (2) cmd ::= SHOW TOPICS */ - { 192, -2 }, /* (3) cmd ::= SHOW MNODES */ - { 192, -2 }, /* (4) cmd ::= SHOW DNODES */ - { 192, -2 }, /* (5) cmd ::= SHOW ACCOUNTS */ - { 192, -2 }, /* (6) cmd ::= SHOW USERS */ - { 192, -2 }, /* (7) cmd ::= SHOW MODULES */ - { 192, -2 }, /* (8) cmd ::= SHOW QUERIES */ - { 192, -2 }, /* (9) cmd ::= SHOW CONNECTIONS */ - { 192, -2 }, /* (10) cmd ::= SHOW STREAMS */ - { 192, -2 }, /* (11) cmd ::= SHOW VARIABLES */ - { 192, -2 }, /* (12) cmd ::= SHOW SCORES */ - { 192, -2 }, /* (13) cmd ::= SHOW GRANTS */ - { 192, -2 }, /* (14) cmd ::= SHOW VNODES */ - { 192, -3 }, /* (15) cmd ::= SHOW VNODES IPTOKEN */ - { 193, 0 }, /* (16) dbPrefix ::= */ - { 193, -2 }, /* (17) dbPrefix ::= ids DOT */ - { 195, 0 }, /* (18) cpxName ::= */ - { 195, -2 }, /* (19) cpxName ::= DOT ids */ - { 192, -5 }, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 192, -5 }, /* (21) cmd ::= SHOW CREATE STABLE ids cpxName */ - { 192, -4 }, /* (22) cmd ::= SHOW CREATE DATABASE ids */ - { 192, -3 }, /* (23) cmd ::= SHOW dbPrefix TABLES */ - { 192, -5 }, /* (24) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 192, -3 }, /* (25) cmd ::= SHOW dbPrefix STABLES */ - { 192, -5 }, /* (26) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 192, -3 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS */ - { 192, -4 }, /* (28) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 192, -5 }, /* (29) cmd ::= DROP TABLE ifexists ids cpxName */ - { 192, -5 }, /* (30) cmd ::= DROP STABLE ifexists ids cpxName */ - { 192, -4 }, /* (31) cmd ::= DROP DATABASE ifexists ids */ - { 192, -4 }, /* (32) cmd ::= DROP TOPIC ifexists ids */ - { 192, -3 }, /* (33) cmd ::= DROP DNODE ids */ - { 192, -3 }, /* (34) cmd ::= DROP USER ids */ - { 192, -3 }, /* (35) cmd ::= DROP ACCOUNT ids */ - { 192, -2 }, /* (36) cmd ::= USE ids */ - { 192, -3 }, /* (37) cmd ::= DESCRIBE ids cpxName */ - { 192, -5 }, /* (38) cmd ::= ALTER USER ids PASS ids */ - { 192, -5 }, /* (39) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 192, -4 }, /* (40) cmd ::= ALTER DNODE ids ids */ - { 192, -5 }, /* (41) cmd ::= ALTER DNODE ids ids ids */ - { 192, -3 }, /* (42) cmd ::= ALTER LOCAL ids */ - { 192, -4 }, /* (43) cmd ::= ALTER LOCAL ids ids */ - { 192, -4 }, /* (44) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 192, -4 }, /* (45) cmd ::= ALTER TOPIC ids alter_topic_optr */ - { 192, -4 }, /* (46) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 192, -6 }, /* (47) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 192, -6 }, /* (48) cmd ::= COMPACT VNODES IN LP exprlist RP */ - { 194, -1 }, /* (49) ids ::= ID */ - { 194, -1 }, /* (50) ids ::= STRING */ - { 196, -2 }, /* (51) ifexists ::= IF EXISTS */ - { 196, 0 }, /* (52) ifexists ::= */ - { 201, -3 }, /* (53) ifnotexists ::= IF NOT EXISTS */ - { 201, 0 }, /* (54) ifnotexists ::= */ - { 192, -3 }, /* (55) cmd ::= CREATE DNODE ids */ - { 192, -6 }, /* (56) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 192, -5 }, /* (57) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 192, -5 }, /* (58) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - { 192, -5 }, /* (59) cmd ::= CREATE USER ids PASS ids */ - { 204, 0 }, /* (60) pps ::= */ - { 204, -2 }, /* (61) pps ::= PPS INTEGER */ - { 205, 0 }, /* (62) tseries ::= */ - { 205, -2 }, /* (63) tseries ::= TSERIES INTEGER */ - { 206, 0 }, /* (64) dbs ::= */ - { 206, -2 }, /* (65) dbs ::= DBS INTEGER */ - { 207, 0 }, /* (66) streams ::= */ - { 207, -2 }, /* (67) streams ::= STREAMS INTEGER */ - { 208, 0 }, /* (68) storage ::= */ - { 208, -2 }, /* (69) storage ::= STORAGE INTEGER */ - { 209, 0 }, /* (70) qtime ::= */ - { 209, -2 }, /* (71) qtime ::= QTIME INTEGER */ - { 210, 0 }, /* (72) users ::= */ - { 210, -2 }, /* (73) users ::= USERS INTEGER */ - { 211, 0 }, /* (74) conns ::= */ - { 211, -2 }, /* (75) conns ::= CONNS INTEGER */ - { 212, 0 }, /* (76) state ::= */ - { 212, -2 }, /* (77) state ::= STATE ids */ - { 199, -9 }, /* (78) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 213, -3 }, /* (79) intitemlist ::= intitemlist COMMA intitem */ - { 213, -1 }, /* (80) intitemlist ::= intitem */ - { 214, -1 }, /* (81) intitem ::= INTEGER */ - { 215, -2 }, /* (82) keep ::= KEEP intitemlist */ - { 216, -2 }, /* (83) cache ::= CACHE INTEGER */ - { 217, -2 }, /* (84) replica ::= REPLICA INTEGER */ - { 218, -2 }, /* (85) quorum ::= QUORUM INTEGER */ - { 219, -2 }, /* (86) days ::= DAYS INTEGER */ - { 220, -2 }, /* (87) minrows ::= MINROWS INTEGER */ - { 221, -2 }, /* (88) maxrows ::= MAXROWS INTEGER */ - { 222, -2 }, /* (89) blocks ::= BLOCKS INTEGER */ - { 223, -2 }, /* (90) ctime ::= CTIME INTEGER */ - { 224, -2 }, /* (91) wal ::= WAL INTEGER */ - { 225, -2 }, /* (92) fsync ::= FSYNC INTEGER */ - { 226, -2 }, /* (93) comp ::= COMP INTEGER */ - { 227, -2 }, /* (94) prec ::= PRECISION STRING */ - { 228, -2 }, /* (95) update ::= UPDATE INTEGER */ - { 229, -2 }, /* (96) cachelast ::= CACHELAST INTEGER */ - { 230, -2 }, /* (97) partitions ::= PARTITIONS INTEGER */ - { 202, 0 }, /* (98) db_optr ::= */ - { 202, -2 }, /* (99) db_optr ::= db_optr cache */ - { 202, -2 }, /* (100) db_optr ::= db_optr replica */ - { 202, -2 }, /* (101) db_optr ::= db_optr quorum */ - { 202, -2 }, /* (102) db_optr ::= db_optr days */ - { 202, -2 }, /* (103) db_optr ::= db_optr minrows */ - { 202, -2 }, /* (104) db_optr ::= db_optr maxrows */ - { 202, -2 }, /* (105) db_optr ::= db_optr blocks */ - { 202, -2 }, /* (106) db_optr ::= db_optr ctime */ - { 202, -2 }, /* (107) db_optr ::= db_optr wal */ - { 202, -2 }, /* (108) db_optr ::= db_optr fsync */ - { 202, -2 }, /* (109) db_optr ::= db_optr comp */ - { 202, -2 }, /* (110) db_optr ::= db_optr prec */ - { 202, -2 }, /* (111) db_optr ::= db_optr keep */ - { 202, -2 }, /* (112) db_optr ::= db_optr update */ - { 202, -2 }, /* (113) db_optr ::= db_optr cachelast */ - { 203, -1 }, /* (114) topic_optr ::= db_optr */ - { 203, -2 }, /* (115) topic_optr ::= topic_optr partitions */ - { 197, 0 }, /* (116) alter_db_optr ::= */ - { 197, -2 }, /* (117) alter_db_optr ::= alter_db_optr replica */ - { 197, -2 }, /* (118) alter_db_optr ::= alter_db_optr quorum */ - { 197, -2 }, /* (119) alter_db_optr ::= alter_db_optr keep */ - { 197, -2 }, /* (120) alter_db_optr ::= alter_db_optr blocks */ - { 197, -2 }, /* (121) alter_db_optr ::= alter_db_optr comp */ - { 197, -2 }, /* (122) alter_db_optr ::= alter_db_optr wal */ - { 197, -2 }, /* (123) alter_db_optr ::= alter_db_optr fsync */ - { 197, -2 }, /* (124) alter_db_optr ::= alter_db_optr update */ - { 197, -2 }, /* (125) alter_db_optr ::= alter_db_optr cachelast */ - { 198, -1 }, /* (126) alter_topic_optr ::= alter_db_optr */ - { 198, -2 }, /* (127) alter_topic_optr ::= alter_topic_optr partitions */ - { 231, -1 }, /* (128) typename ::= ids */ - { 231, -4 }, /* (129) typename ::= ids LP signed RP */ - { 231, -2 }, /* (130) typename ::= ids UNSIGNED */ - { 232, -1 }, /* (131) signed ::= INTEGER */ - { 232, -2 }, /* (132) signed ::= PLUS INTEGER */ - { 232, -2 }, /* (133) signed ::= MINUS INTEGER */ - { 192, -3 }, /* (134) cmd ::= CREATE TABLE create_table_args */ - { 192, -3 }, /* (135) cmd ::= CREATE TABLE create_stable_args */ - { 192, -3 }, /* (136) cmd ::= CREATE STABLE create_stable_args */ - { 192, -3 }, /* (137) cmd ::= CREATE TABLE create_table_list */ - { 235, -1 }, /* (138) create_table_list ::= create_from_stable */ - { 235, -2 }, /* (139) create_table_list ::= create_table_list create_from_stable */ - { 233, -6 }, /* (140) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - { 234, -10 }, /* (141) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - { 236, -10 }, /* (142) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 236, -13 }, /* (143) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - { 239, -3 }, /* (144) tagNamelist ::= tagNamelist COMMA ids */ - { 239, -1 }, /* (145) tagNamelist ::= ids */ - { 233, -5 }, /* (146) create_table_args ::= ifnotexists ids cpxName AS select */ - { 237, -3 }, /* (147) columnlist ::= columnlist COMMA column */ - { 237, -1 }, /* (148) columnlist ::= column */ - { 241, -2 }, /* (149) column ::= ids typename */ - { 238, -3 }, /* (150) tagitemlist ::= tagitemlist COMMA tagitem */ - { 238, -1 }, /* (151) tagitemlist ::= tagitem */ - { 242, -1 }, /* (152) tagitem ::= INTEGER */ - { 242, -1 }, /* (153) tagitem ::= FLOAT */ - { 242, -1 }, /* (154) tagitem ::= STRING */ - { 242, -1 }, /* (155) tagitem ::= BOOL */ - { 242, -1 }, /* (156) tagitem ::= NULL */ - { 242, -2 }, /* (157) tagitem ::= MINUS INTEGER */ - { 242, -2 }, /* (158) tagitem ::= MINUS FLOAT */ - { 242, -2 }, /* (159) tagitem ::= PLUS INTEGER */ - { 242, -2 }, /* (160) tagitem ::= PLUS FLOAT */ - { 240, -14 }, /* (161) select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 240, -3 }, /* (162) select ::= LP select RP */ - { 256, -1 }, /* (163) union ::= select */ - { 256, -4 }, /* (164) union ::= union UNION ALL select */ - { 192, -1 }, /* (165) cmd ::= union */ - { 240, -2 }, /* (166) select ::= SELECT selcollist */ - { 257, -2 }, /* (167) sclp ::= selcollist COMMA */ - { 257, 0 }, /* (168) sclp ::= */ - { 243, -4 }, /* (169) selcollist ::= sclp distinct expr as */ - { 243, -2 }, /* (170) selcollist ::= sclp STAR */ - { 260, -2 }, /* (171) as ::= AS ids */ - { 260, -1 }, /* (172) as ::= ids */ - { 260, 0 }, /* (173) as ::= */ - { 258, -1 }, /* (174) distinct ::= DISTINCT */ - { 258, 0 }, /* (175) distinct ::= */ - { 244, -2 }, /* (176) from ::= FROM tablelist */ - { 244, -2 }, /* (177) from ::= FROM sub */ - { 262, -3 }, /* (178) sub ::= LP union RP */ - { 262, -4 }, /* (179) sub ::= LP union RP ids */ - { 262, -6 }, /* (180) sub ::= sub COMMA LP union RP ids */ - { 261, -2 }, /* (181) tablelist ::= ids cpxName */ - { 261, -3 }, /* (182) tablelist ::= ids cpxName ids */ - { 261, -4 }, /* (183) tablelist ::= tablelist COMMA ids cpxName */ - { 261, -5 }, /* (184) tablelist ::= tablelist COMMA ids cpxName ids */ - { 263, -1 }, /* (185) tmvar ::= VARIABLE */ - { 246, -4 }, /* (186) interval_opt ::= INTERVAL LP tmvar RP */ - { 246, -6 }, /* (187) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 246, 0 }, /* (188) interval_opt ::= */ - { 247, 0 }, /* (189) session_option ::= */ - { 247, -7 }, /* (190) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - { 248, 0 }, /* (191) windowstate_option ::= */ - { 248, -4 }, /* (192) windowstate_option ::= STATE_WINDOW LP ids RP */ - { 249, 0 }, /* (193) fill_opt ::= */ - { 249, -6 }, /* (194) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 249, -4 }, /* (195) fill_opt ::= FILL LP ID RP */ - { 250, -4 }, /* (196) sliding_opt ::= SLIDING LP tmvar RP */ - { 250, 0 }, /* (197) sliding_opt ::= */ - { 252, 0 }, /* (198) orderby_opt ::= */ - { 252, -3 }, /* (199) orderby_opt ::= ORDER BY sortlist */ - { 264, -4 }, /* (200) sortlist ::= sortlist COMMA item sortorder */ - { 264, -2 }, /* (201) sortlist ::= item sortorder */ - { 266, -2 }, /* (202) item ::= ids cpxName */ - { 267, -1 }, /* (203) sortorder ::= ASC */ - { 267, -1 }, /* (204) sortorder ::= DESC */ - { 267, 0 }, /* (205) sortorder ::= */ - { 251, 0 }, /* (206) groupby_opt ::= */ - { 251, -3 }, /* (207) groupby_opt ::= GROUP BY grouplist */ - { 268, -3 }, /* (208) grouplist ::= grouplist COMMA item */ - { 268, -1 }, /* (209) grouplist ::= item */ - { 253, 0 }, /* (210) having_opt ::= */ - { 253, -2 }, /* (211) having_opt ::= HAVING expr */ - { 255, 0 }, /* (212) limit_opt ::= */ - { 255, -2 }, /* (213) limit_opt ::= LIMIT signed */ - { 255, -4 }, /* (214) limit_opt ::= LIMIT signed OFFSET signed */ - { 255, -4 }, /* (215) limit_opt ::= LIMIT signed COMMA signed */ - { 254, 0 }, /* (216) slimit_opt ::= */ - { 254, -2 }, /* (217) slimit_opt ::= SLIMIT signed */ - { 254, -4 }, /* (218) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 254, -4 }, /* (219) slimit_opt ::= SLIMIT signed COMMA signed */ - { 245, 0 }, /* (220) where_opt ::= */ - { 245, -2 }, /* (221) where_opt ::= WHERE expr */ - { 259, -3 }, /* (222) expr ::= LP expr RP */ - { 259, -1 }, /* (223) expr ::= ID */ - { 259, -3 }, /* (224) expr ::= ID DOT ID */ - { 259, -3 }, /* (225) expr ::= ID DOT STAR */ - { 259, -1 }, /* (226) expr ::= INTEGER */ - { 259, -2 }, /* (227) expr ::= MINUS INTEGER */ - { 259, -2 }, /* (228) expr ::= PLUS INTEGER */ - { 259, -1 }, /* (229) expr ::= FLOAT */ - { 259, -2 }, /* (230) expr ::= MINUS FLOAT */ - { 259, -2 }, /* (231) expr ::= PLUS FLOAT */ - { 259, -1 }, /* (232) expr ::= STRING */ - { 259, -1 }, /* (233) expr ::= NOW */ - { 259, -1 }, /* (234) expr ::= VARIABLE */ - { 259, -2 }, /* (235) expr ::= PLUS VARIABLE */ - { 259, -2 }, /* (236) expr ::= MINUS VARIABLE */ - { 259, -1 }, /* (237) expr ::= BOOL */ - { 259, -1 }, /* (238) expr ::= NULL */ - { 259, -4 }, /* (239) expr ::= ID LP exprlist RP */ - { 259, -4 }, /* (240) expr ::= ID LP STAR RP */ - { 259, -3 }, /* (241) expr ::= expr IS NULL */ - { 259, -4 }, /* (242) expr ::= expr IS NOT NULL */ - { 259, -3 }, /* (243) expr ::= expr LT expr */ - { 259, -3 }, /* (244) expr ::= expr GT expr */ - { 259, -3 }, /* (245) expr ::= expr LE expr */ - { 259, -3 }, /* (246) expr ::= expr GE expr */ - { 259, -3 }, /* (247) expr ::= expr NE expr */ - { 259, -3 }, /* (248) expr ::= expr EQ expr */ - { 259, -5 }, /* (249) expr ::= expr BETWEEN expr AND expr */ - { 259, -3 }, /* (250) expr ::= expr AND expr */ - { 259, -3 }, /* (251) expr ::= expr OR expr */ - { 259, -3 }, /* (252) expr ::= expr PLUS expr */ - { 259, -3 }, /* (253) expr ::= expr MINUS expr */ - { 259, -3 }, /* (254) expr ::= expr STAR expr */ - { 259, -3 }, /* (255) expr ::= expr SLASH expr */ - { 259, -3 }, /* (256) expr ::= expr REM expr */ - { 259, -3 }, /* (257) expr ::= expr LIKE expr */ - { 259, -5 }, /* (258) expr ::= expr IN LP exprlist RP */ - { 200, -3 }, /* (259) exprlist ::= exprlist COMMA expritem */ - { 200, -1 }, /* (260) exprlist ::= expritem */ - { 269, -1 }, /* (261) expritem ::= expr */ - { 269, 0 }, /* (262) expritem ::= */ - { 192, -3 }, /* (263) cmd ::= RESET QUERY CACHE */ - { 192, -3 }, /* (264) cmd ::= SYNCDB ids REPLICA */ - { 192, -7 }, /* (265) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 192, -7 }, /* (266) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 192, -7 }, /* (267) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ - { 192, -7 }, /* (268) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 192, -7 }, /* (269) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 192, -8 }, /* (270) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 192, -9 }, /* (271) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 192, -7 }, /* (272) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ - { 192, -7 }, /* (273) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 192, -7 }, /* (274) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 192, -7 }, /* (275) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ - { 192, -7 }, /* (276) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 192, -7 }, /* (277) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 192, -8 }, /* (278) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 192, -9 }, /* (279) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ - { 192, -7 }, /* (280) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ - { 192, -3 }, /* (281) cmd ::= KILL CONNECTION INTEGER */ - { 192, -5 }, /* (282) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 192, -5 }, /* (283) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 196, -1 }, /* (0) program ::= cmd */ + { 197, -2 }, /* (1) cmd ::= SHOW DATABASES */ + { 197, -2 }, /* (2) cmd ::= SHOW TOPICS */ + { 197, -2 }, /* (3) cmd ::= SHOW FUNCTIONS */ + { 197, -2 }, /* (4) cmd ::= SHOW MNODES */ + { 197, -2 }, /* (5) cmd ::= SHOW DNODES */ + { 197, -2 }, /* (6) cmd ::= SHOW ACCOUNTS */ + { 197, -2 }, /* (7) cmd ::= SHOW USERS */ + { 197, -2 }, /* (8) cmd ::= SHOW MODULES */ + { 197, -2 }, /* (9) cmd ::= SHOW QUERIES */ + { 197, -2 }, /* (10) cmd ::= SHOW CONNECTIONS */ + { 197, -2 }, /* (11) cmd ::= SHOW STREAMS */ + { 197, -2 }, /* (12) cmd ::= SHOW VARIABLES */ + { 197, -2 }, /* (13) cmd ::= SHOW SCORES */ + { 197, -2 }, /* (14) cmd ::= SHOW GRANTS */ + { 197, -2 }, /* (15) cmd ::= SHOW VNODES */ + { 197, -3 }, /* (16) cmd ::= SHOW VNODES IPTOKEN */ + { 198, 0 }, /* (17) dbPrefix ::= */ + { 198, -2 }, /* (18) dbPrefix ::= ids DOT */ + { 200, 0 }, /* (19) cpxName ::= */ + { 200, -2 }, /* (20) cpxName ::= DOT ids */ + { 197, -5 }, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */ + { 197, -5 }, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */ + { 197, -4 }, /* (23) cmd ::= SHOW CREATE DATABASE ids */ + { 197, -3 }, /* (24) cmd ::= SHOW dbPrefix TABLES */ + { 197, -5 }, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 197, -3 }, /* (26) cmd ::= SHOW dbPrefix STABLES */ + { 197, -5 }, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 197, -3 }, /* (28) cmd ::= SHOW dbPrefix VGROUPS */ + { 197, -4 }, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 197, -5 }, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */ + { 197, -5 }, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */ + { 197, -4 }, /* (32) cmd ::= DROP DATABASE ifexists ids */ + { 197, -4 }, /* (33) cmd ::= DROP TOPIC ifexists ids */ + { 197, -3 }, /* (34) cmd ::= DROP FUNCTION ids */ + { 197, -3 }, /* (35) cmd ::= DROP DNODE ids */ + { 197, -3 }, /* (36) cmd ::= DROP USER ids */ + { 197, -3 }, /* (37) cmd ::= DROP ACCOUNT ids */ + { 197, -2 }, /* (38) cmd ::= USE ids */ + { 197, -3 }, /* (39) cmd ::= DESCRIBE ids cpxName */ + { 197, -5 }, /* (40) cmd ::= ALTER USER ids PASS ids */ + { 197, -5 }, /* (41) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 197, -4 }, /* (42) cmd ::= ALTER DNODE ids ids */ + { 197, -5 }, /* (43) cmd ::= ALTER DNODE ids ids ids */ + { 197, -3 }, /* (44) cmd ::= ALTER LOCAL ids */ + { 197, -4 }, /* (45) cmd ::= ALTER LOCAL ids ids */ + { 197, -4 }, /* (46) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 197, -4 }, /* (47) cmd ::= ALTER TOPIC ids alter_topic_optr */ + { 197, -4 }, /* (48) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 197, -6 }, /* (49) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 197, -6 }, /* (50) cmd ::= COMPACT VNODES IN LP exprlist RP */ + { 199, -1 }, /* (51) ids ::= ID */ + { 199, -1 }, /* (52) ids ::= STRING */ + { 201, -2 }, /* (53) ifexists ::= IF EXISTS */ + { 201, 0 }, /* (54) ifexists ::= */ + { 206, -3 }, /* (55) ifnotexists ::= IF NOT EXISTS */ + { 206, 0 }, /* (56) ifnotexists ::= */ + { 197, -3 }, /* (57) cmd ::= CREATE DNODE ids */ + { 197, -6 }, /* (58) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 197, -5 }, /* (59) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 197, -5 }, /* (60) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + { 197, -8 }, /* (61) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + { 197, -9 }, /* (62) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ + { 197, -5 }, /* (63) cmd ::= CREATE USER ids PASS ids */ + { 210, 0 }, /* (64) bufsize ::= */ + { 210, -2 }, /* (65) bufsize ::= BUFSIZE INTEGER */ + { 211, 0 }, /* (66) pps ::= */ + { 211, -2 }, /* (67) pps ::= PPS INTEGER */ + { 212, 0 }, /* (68) tseries ::= */ + { 212, -2 }, /* (69) tseries ::= TSERIES INTEGER */ + { 213, 0 }, /* (70) dbs ::= */ + { 213, -2 }, /* (71) dbs ::= DBS INTEGER */ + { 214, 0 }, /* (72) streams ::= */ + { 214, -2 }, /* (73) streams ::= STREAMS INTEGER */ + { 215, 0 }, /* (74) storage ::= */ + { 215, -2 }, /* (75) storage ::= STORAGE INTEGER */ + { 216, 0 }, /* (76) qtime ::= */ + { 216, -2 }, /* (77) qtime ::= QTIME INTEGER */ + { 217, 0 }, /* (78) users ::= */ + { 217, -2 }, /* (79) users ::= USERS INTEGER */ + { 218, 0 }, /* (80) conns ::= */ + { 218, -2 }, /* (81) conns ::= CONNS INTEGER */ + { 219, 0 }, /* (82) state ::= */ + { 219, -2 }, /* (83) state ::= STATE ids */ + { 204, -9 }, /* (84) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 220, -3 }, /* (85) intitemlist ::= intitemlist COMMA intitem */ + { 220, -1 }, /* (86) intitemlist ::= intitem */ + { 221, -1 }, /* (87) intitem ::= INTEGER */ + { 222, -2 }, /* (88) keep ::= KEEP intitemlist */ + { 223, -2 }, /* (89) cache ::= CACHE INTEGER */ + { 224, -2 }, /* (90) replica ::= REPLICA INTEGER */ + { 225, -2 }, /* (91) quorum ::= QUORUM INTEGER */ + { 226, -2 }, /* (92) days ::= DAYS INTEGER */ + { 227, -2 }, /* (93) minrows ::= MINROWS INTEGER */ + { 228, -2 }, /* (94) maxrows ::= MAXROWS INTEGER */ + { 229, -2 }, /* (95) blocks ::= BLOCKS INTEGER */ + { 230, -2 }, /* (96) ctime ::= CTIME INTEGER */ + { 231, -2 }, /* (97) wal ::= WAL INTEGER */ + { 232, -2 }, /* (98) fsync ::= FSYNC INTEGER */ + { 233, -2 }, /* (99) comp ::= COMP INTEGER */ + { 234, -2 }, /* (100) prec ::= PRECISION STRING */ + { 235, -2 }, /* (101) update ::= UPDATE INTEGER */ + { 236, -2 }, /* (102) cachelast ::= CACHELAST INTEGER */ + { 237, -2 }, /* (103) partitions ::= PARTITIONS INTEGER */ + { 207, 0 }, /* (104) db_optr ::= */ + { 207, -2 }, /* (105) db_optr ::= db_optr cache */ + { 207, -2 }, /* (106) db_optr ::= db_optr replica */ + { 207, -2 }, /* (107) db_optr ::= db_optr quorum */ + { 207, -2 }, /* (108) db_optr ::= db_optr days */ + { 207, -2 }, /* (109) db_optr ::= db_optr minrows */ + { 207, -2 }, /* (110) db_optr ::= db_optr maxrows */ + { 207, -2 }, /* (111) db_optr ::= db_optr blocks */ + { 207, -2 }, /* (112) db_optr ::= db_optr ctime */ + { 207, -2 }, /* (113) db_optr ::= db_optr wal */ + { 207, -2 }, /* (114) db_optr ::= db_optr fsync */ + { 207, -2 }, /* (115) db_optr ::= db_optr comp */ + { 207, -2 }, /* (116) db_optr ::= db_optr prec */ + { 207, -2 }, /* (117) db_optr ::= db_optr keep */ + { 207, -2 }, /* (118) db_optr ::= db_optr update */ + { 207, -2 }, /* (119) db_optr ::= db_optr cachelast */ + { 208, -1 }, /* (120) topic_optr ::= db_optr */ + { 208, -2 }, /* (121) topic_optr ::= topic_optr partitions */ + { 202, 0 }, /* (122) alter_db_optr ::= */ + { 202, -2 }, /* (123) alter_db_optr ::= alter_db_optr replica */ + { 202, -2 }, /* (124) alter_db_optr ::= alter_db_optr quorum */ + { 202, -2 }, /* (125) alter_db_optr ::= alter_db_optr keep */ + { 202, -2 }, /* (126) alter_db_optr ::= alter_db_optr blocks */ + { 202, -2 }, /* (127) alter_db_optr ::= alter_db_optr comp */ + { 202, -2 }, /* (128) alter_db_optr ::= alter_db_optr update */ + { 202, -2 }, /* (129) alter_db_optr ::= alter_db_optr cachelast */ + { 203, -1 }, /* (130) alter_topic_optr ::= alter_db_optr */ + { 203, -2 }, /* (131) alter_topic_optr ::= alter_topic_optr partitions */ + { 209, -1 }, /* (132) typename ::= ids */ + { 209, -4 }, /* (133) typename ::= ids LP signed RP */ + { 209, -2 }, /* (134) typename ::= ids UNSIGNED */ + { 238, -1 }, /* (135) signed ::= INTEGER */ + { 238, -2 }, /* (136) signed ::= PLUS INTEGER */ + { 238, -2 }, /* (137) signed ::= MINUS INTEGER */ + { 197, -3 }, /* (138) cmd ::= CREATE TABLE create_table_args */ + { 197, -3 }, /* (139) cmd ::= CREATE TABLE create_stable_args */ + { 197, -3 }, /* (140) cmd ::= CREATE STABLE create_stable_args */ + { 197, -3 }, /* (141) cmd ::= CREATE TABLE create_table_list */ + { 241, -1 }, /* (142) create_table_list ::= create_from_stable */ + { 241, -2 }, /* (143) create_table_list ::= create_table_list create_from_stable */ + { 239, -6 }, /* (144) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + { 240, -10 }, /* (145) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + { 242, -10 }, /* (146) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + { 242, -13 }, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + { 245, -3 }, /* (148) tagNamelist ::= tagNamelist COMMA ids */ + { 245, -1 }, /* (149) tagNamelist ::= ids */ + { 239, -5 }, /* (150) create_table_args ::= ifnotexists ids cpxName AS select */ + { 243, -3 }, /* (151) columnlist ::= columnlist COMMA column */ + { 243, -1 }, /* (152) columnlist ::= column */ + { 247, -2 }, /* (153) column ::= ids typename */ + { 244, -3 }, /* (154) tagitemlist ::= tagitemlist COMMA tagitem */ + { 244, -1 }, /* (155) tagitemlist ::= tagitem */ + { 248, -1 }, /* (156) tagitem ::= INTEGER */ + { 248, -1 }, /* (157) tagitem ::= FLOAT */ + { 248, -1 }, /* (158) tagitem ::= STRING */ + { 248, -1 }, /* (159) tagitem ::= BOOL */ + { 248, -1 }, /* (160) tagitem ::= NULL */ + { 248, -1 }, /* (161) tagitem ::= NOW */ + { 248, -2 }, /* (162) tagitem ::= MINUS INTEGER */ + { 248, -2 }, /* (163) tagitem ::= MINUS FLOAT */ + { 248, -2 }, /* (164) tagitem ::= PLUS INTEGER */ + { 248, -2 }, /* (165) tagitem ::= PLUS FLOAT */ + { 246, -14 }, /* (166) select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ + { 246, -3 }, /* (167) select ::= LP select RP */ + { 262, -1 }, /* (168) union ::= select */ + { 262, -4 }, /* (169) union ::= union UNION ALL select */ + { 197, -1 }, /* (170) cmd ::= union */ + { 246, -2 }, /* (171) select ::= SELECT selcollist */ + { 263, -2 }, /* (172) sclp ::= selcollist COMMA */ + { 263, 0 }, /* (173) sclp ::= */ + { 249, -4 }, /* (174) selcollist ::= sclp distinct expr as */ + { 249, -2 }, /* (175) selcollist ::= sclp STAR */ + { 266, -2 }, /* (176) as ::= AS ids */ + { 266, -1 }, /* (177) as ::= ids */ + { 266, 0 }, /* (178) as ::= */ + { 264, -1 }, /* (179) distinct ::= DISTINCT */ + { 264, 0 }, /* (180) distinct ::= */ + { 250, -2 }, /* (181) from ::= FROM tablelist */ + { 250, -2 }, /* (182) from ::= FROM sub */ + { 268, -3 }, /* (183) sub ::= LP union RP */ + { 268, -4 }, /* (184) sub ::= LP union RP ids */ + { 268, -6 }, /* (185) sub ::= sub COMMA LP union RP ids */ + { 267, -2 }, /* (186) tablelist ::= ids cpxName */ + { 267, -3 }, /* (187) tablelist ::= ids cpxName ids */ + { 267, -4 }, /* (188) tablelist ::= tablelist COMMA ids cpxName */ + { 267, -5 }, /* (189) tablelist ::= tablelist COMMA ids cpxName ids */ + { 269, -1 }, /* (190) tmvar ::= VARIABLE */ + { 252, -4 }, /* (191) interval_opt ::= INTERVAL LP tmvar RP */ + { 252, -6 }, /* (192) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 252, 0 }, /* (193) interval_opt ::= */ + { 253, 0 }, /* (194) session_option ::= */ + { 253, -7 }, /* (195) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + { 254, 0 }, /* (196) windowstate_option ::= */ + { 254, -4 }, /* (197) windowstate_option ::= STATE_WINDOW LP ids RP */ + { 255, 0 }, /* (198) fill_opt ::= */ + { 255, -6 }, /* (199) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 255, -4 }, /* (200) fill_opt ::= FILL LP ID RP */ + { 256, -4 }, /* (201) sliding_opt ::= SLIDING LP tmvar RP */ + { 256, 0 }, /* (202) sliding_opt ::= */ + { 259, 0 }, /* (203) orderby_opt ::= */ + { 259, -3 }, /* (204) orderby_opt ::= ORDER BY sortlist */ + { 270, -4 }, /* (205) sortlist ::= sortlist COMMA item sortorder */ + { 270, -2 }, /* (206) sortlist ::= item sortorder */ + { 272, -2 }, /* (207) item ::= ids cpxName */ + { 273, -1 }, /* (208) sortorder ::= ASC */ + { 273, -1 }, /* (209) sortorder ::= DESC */ + { 273, 0 }, /* (210) sortorder ::= */ + { 257, 0 }, /* (211) groupby_opt ::= */ + { 257, -3 }, /* (212) groupby_opt ::= GROUP BY grouplist */ + { 274, -3 }, /* (213) grouplist ::= grouplist COMMA item */ + { 274, -1 }, /* (214) grouplist ::= item */ + { 258, 0 }, /* (215) having_opt ::= */ + { 258, -2 }, /* (216) having_opt ::= HAVING expr */ + { 261, 0 }, /* (217) limit_opt ::= */ + { 261, -2 }, /* (218) limit_opt ::= LIMIT signed */ + { 261, -4 }, /* (219) limit_opt ::= LIMIT signed OFFSET signed */ + { 261, -4 }, /* (220) limit_opt ::= LIMIT signed COMMA signed */ + { 260, 0 }, /* (221) slimit_opt ::= */ + { 260, -2 }, /* (222) slimit_opt ::= SLIMIT signed */ + { 260, -4 }, /* (223) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 260, -4 }, /* (224) slimit_opt ::= SLIMIT signed COMMA signed */ + { 251, 0 }, /* (225) where_opt ::= */ + { 251, -2 }, /* (226) where_opt ::= WHERE expr */ + { 265, -3 }, /* (227) expr ::= LP expr RP */ + { 265, -1 }, /* (228) expr ::= ID */ + { 265, -3 }, /* (229) expr ::= ID DOT ID */ + { 265, -3 }, /* (230) expr ::= ID DOT STAR */ + { 265, -1 }, /* (231) expr ::= INTEGER */ + { 265, -2 }, /* (232) expr ::= MINUS INTEGER */ + { 265, -2 }, /* (233) expr ::= PLUS INTEGER */ + { 265, -1 }, /* (234) expr ::= FLOAT */ + { 265, -2 }, /* (235) expr ::= MINUS FLOAT */ + { 265, -2 }, /* (236) expr ::= PLUS FLOAT */ + { 265, -1 }, /* (237) expr ::= STRING */ + { 265, -1 }, /* (238) expr ::= NOW */ + { 265, -1 }, /* (239) expr ::= VARIABLE */ + { 265, -2 }, /* (240) expr ::= PLUS VARIABLE */ + { 265, -2 }, /* (241) expr ::= MINUS VARIABLE */ + { 265, -1 }, /* (242) expr ::= BOOL */ + { 265, -1 }, /* (243) expr ::= NULL */ + { 265, -4 }, /* (244) expr ::= ID LP exprlist RP */ + { 265, -4 }, /* (245) expr ::= ID LP STAR RP */ + { 265, -3 }, /* (246) expr ::= expr IS NULL */ + { 265, -4 }, /* (247) expr ::= expr IS NOT NULL */ + { 265, -3 }, /* (248) expr ::= expr LT expr */ + { 265, -3 }, /* (249) expr ::= expr GT expr */ + { 265, -3 }, /* (250) expr ::= expr LE expr */ + { 265, -3 }, /* (251) expr ::= expr GE expr */ + { 265, -3 }, /* (252) expr ::= expr NE expr */ + { 265, -3 }, /* (253) expr ::= expr EQ expr */ + { 265, -5 }, /* (254) expr ::= expr BETWEEN expr AND expr */ + { 265, -3 }, /* (255) expr ::= expr AND expr */ + { 265, -3 }, /* (256) expr ::= expr OR expr */ + { 265, -3 }, /* (257) expr ::= expr PLUS expr */ + { 265, -3 }, /* (258) expr ::= expr MINUS expr */ + { 265, -3 }, /* (259) expr ::= expr STAR expr */ + { 265, -3 }, /* (260) expr ::= expr SLASH expr */ + { 265, -3 }, /* (261) expr ::= expr REM expr */ + { 265, -3 }, /* (262) expr ::= expr LIKE expr */ + { 265, -5 }, /* (263) expr ::= expr IN LP exprlist RP */ + { 205, -3 }, /* (264) exprlist ::= exprlist COMMA expritem */ + { 205, -1 }, /* (265) exprlist ::= expritem */ + { 275, -1 }, /* (266) expritem ::= expr */ + { 275, 0 }, /* (267) expritem ::= */ + { 197, -3 }, /* (268) cmd ::= RESET QUERY CACHE */ + { 197, -3 }, /* (269) cmd ::= SYNCDB ids REPLICA */ + { 197, -7 }, /* (270) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 197, -7 }, /* (271) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 197, -7 }, /* (272) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + { 197, -7 }, /* (273) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 197, -7 }, /* (274) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 197, -8 }, /* (275) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 197, -9 }, /* (276) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 197, -7 }, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + { 197, -7 }, /* (278) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + { 197, -7 }, /* (279) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + { 197, -7 }, /* (280) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + { 197, -7 }, /* (281) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + { 197, -7 }, /* (282) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + { 197, -8 }, /* (283) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + { 197, -9 }, /* (284) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + { 197, -7 }, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + { 197, -3 }, /* (286) cmd ::= KILL CONNECTION INTEGER */ + { 197, -5 }, /* (287) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 197, -5 }, /* (288) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2164,9 +2194,9 @@ static void yy_reduce( /********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* program ::= cmd */ - case 134: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==134); - case 135: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==135); - case 136: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==136); + case 138: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==138); + case 139: /* cmd ::= CREATE TABLE create_stable_args */ yytestcase(yyruleno==139); + case 140: /* cmd ::= CREATE STABLE create_stable_args */ yytestcase(yyruleno==140); {} break; case 1: /* cmd ::= SHOW DATABASES */ @@ -2175,500 +2205,516 @@ static void yy_reduce( case 2: /* cmd ::= SHOW TOPICS */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_TP, 0, 0);} break; - case 3: /* cmd ::= SHOW MNODES */ + case 3: /* cmd ::= SHOW FUNCTIONS */ +{ setShowOptions(pInfo, TSDB_MGMT_TABLE_FUNCTION, 0, 0);} + break; + case 4: /* cmd ::= SHOW MNODES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_MNODE, 0, 0);} break; - case 4: /* cmd ::= SHOW DNODES */ + case 5: /* cmd ::= SHOW DNODES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_DNODE, 0, 0);} break; - case 5: /* cmd ::= SHOW ACCOUNTS */ + case 6: /* cmd ::= SHOW ACCOUNTS */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_ACCT, 0, 0);} break; - case 6: /* cmd ::= SHOW USERS */ + case 7: /* cmd ::= SHOW USERS */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_USER, 0, 0);} break; - case 7: /* cmd ::= SHOW MODULES */ + case 8: /* cmd ::= SHOW MODULES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_MODULE, 0, 0); } break; - case 8: /* cmd ::= SHOW QUERIES */ + case 9: /* cmd ::= SHOW QUERIES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_QUERIES, 0, 0); } break; - case 9: /* cmd ::= SHOW CONNECTIONS */ + case 10: /* cmd ::= SHOW CONNECTIONS */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_CONNS, 0, 0);} break; - case 10: /* cmd ::= SHOW STREAMS */ + case 11: /* cmd ::= SHOW STREAMS */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_STREAMS, 0, 0); } break; - case 11: /* cmd ::= SHOW VARIABLES */ + case 12: /* cmd ::= SHOW VARIABLES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_VARIABLES, 0, 0); } break; - case 12: /* cmd ::= SHOW SCORES */ + case 13: /* cmd ::= SHOW SCORES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_SCORES, 0, 0); } break; - case 13: /* cmd ::= SHOW GRANTS */ + case 14: /* cmd ::= SHOW GRANTS */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_GRANTS, 0, 0); } break; - case 14: /* cmd ::= SHOW VNODES */ + case 15: /* cmd ::= SHOW VNODES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, 0, 0); } break; - case 15: /* cmd ::= SHOW VNODES IPTOKEN */ + case 16: /* cmd ::= SHOW VNODES IPTOKEN */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_VNODES, &yymsp[0].minor.yy0, 0); } break; - case 16: /* dbPrefix ::= */ + case 17: /* dbPrefix ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.type = 0;} break; - case 17: /* dbPrefix ::= ids DOT */ + case 18: /* dbPrefix ::= ids DOT */ {yylhsminor.yy0 = yymsp[-1].minor.yy0; } yymsp[-1].minor.yy0 = yylhsminor.yy0; break; - case 18: /* cpxName ::= */ + case 19: /* cpxName ::= */ {yymsp[1].minor.yy0.n = 0; } break; - case 19: /* cpxName ::= DOT ids */ + case 20: /* cpxName ::= DOT ids */ {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n += 1; } break; - case 20: /* cmd ::= SHOW CREATE TABLE ids cpxName */ + case 21: /* cmd ::= SHOW CREATE TABLE ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 21: /* cmd ::= SHOW CREATE STABLE ids cpxName */ + case 22: /* cmd ::= SHOW CREATE STABLE ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_STABLE, 1, &yymsp[-1].minor.yy0); } break; - case 22: /* cmd ::= SHOW CREATE DATABASE ids */ + case 23: /* cmd ::= SHOW CREATE DATABASE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_DATABASE, 1, &yymsp[0].minor.yy0); } break; - case 23: /* cmd ::= SHOW dbPrefix TABLES */ + case 24: /* cmd ::= SHOW dbPrefix TABLES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-1].minor.yy0, 0); } break; - case 24: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ + case 25: /* cmd ::= SHOW dbPrefix TABLES LIKE ids */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_TABLE, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0); } break; - case 25: /* cmd ::= SHOW dbPrefix STABLES */ + case 26: /* cmd ::= SHOW dbPrefix STABLES */ { setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &yymsp[-1].minor.yy0, 0); } break; - case 26: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ + case 27: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ { SStrToken token; tSetDbName(&token, &yymsp[-3].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } break; - case 27: /* cmd ::= SHOW dbPrefix VGROUPS */ + case 28: /* cmd ::= SHOW dbPrefix VGROUPS */ { SStrToken token; tSetDbName(&token, &yymsp[-1].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } break; - case 28: /* cmd ::= SHOW dbPrefix VGROUPS ids */ + case 29: /* cmd ::= SHOW dbPrefix VGROUPS ids */ { SStrToken token; tSetDbName(&token, &yymsp[-2].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } break; - case 29: /* cmd ::= DROP TABLE ifexists ids cpxName */ + case 30: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, -1); } break; - case 30: /* cmd ::= DROP STABLE ifexists ids cpxName */ + case 31: /* cmd ::= DROP STABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0, -1, TSDB_SUPER_TABLE); } break; - case 31: /* cmd ::= DROP DATABASE ifexists ids */ + case 32: /* cmd ::= DROP DATABASE ifexists ids */ { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_DEFAULT, -1); } break; - case 32: /* cmd ::= DROP TOPIC ifexists ids */ + case 33: /* cmd ::= DROP TOPIC ifexists ids */ { setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0, TSDB_DB_TYPE_TOPIC, -1); } break; - case 33: /* cmd ::= DROP DNODE ids */ + case 34: /* cmd ::= DROP FUNCTION ids */ +{ setDropFuncInfo(pInfo, TSDB_SQL_DROP_FUNCTION, &yymsp[0].minor.yy0); } + break; + case 35: /* cmd ::= DROP DNODE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } break; - case 34: /* cmd ::= DROP USER ids */ + case 36: /* cmd ::= DROP USER ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_USER, 1, &yymsp[0].minor.yy0); } break; - case 35: /* cmd ::= DROP ACCOUNT ids */ + case 37: /* cmd ::= DROP ACCOUNT ids */ { setDCLSqlElems(pInfo, TSDB_SQL_DROP_ACCT, 1, &yymsp[0].minor.yy0); } break; - case 36: /* cmd ::= USE ids */ + case 38: /* cmd ::= USE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_USE_DB, 1, &yymsp[0].minor.yy0);} break; - case 37: /* cmd ::= DESCRIBE ids cpxName */ + case 39: /* cmd ::= DESCRIBE ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; setDCLSqlElems(pInfo, TSDB_SQL_DESCRIBE_TABLE, 1, &yymsp[-1].minor.yy0); } break; - case 38: /* cmd ::= ALTER USER ids PASS ids */ + case 40: /* cmd ::= ALTER USER ids PASS ids */ { setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } break; - case 39: /* cmd ::= ALTER USER ids PRIVILEGE ids */ + case 41: /* cmd ::= ALTER USER ids PRIVILEGE ids */ { setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} break; - case 40: /* cmd ::= ALTER DNODE ids ids */ + case 42: /* cmd ::= ALTER DNODE ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 41: /* cmd ::= ALTER DNODE ids ids ids */ + case 43: /* cmd ::= ALTER DNODE ids ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_DNODE, 3, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 42: /* cmd ::= ALTER LOCAL ids */ + case 44: /* cmd ::= ALTER LOCAL ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 1, &yymsp[0].minor.yy0); } break; - case 43: /* cmd ::= ALTER LOCAL ids ids */ + case 45: /* cmd ::= ALTER LOCAL ids ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; - case 44: /* cmd ::= ALTER DATABASE ids alter_db_optr */ - case 45: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==45); -{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy382, &t);} + case 46: /* cmd ::= ALTER DATABASE ids alter_db_optr */ + case 47: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==47); +{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy214, &t);} break; - case 46: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy151);} + case 48: /* cmd ::= ALTER ACCOUNT ids acct_optr */ +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy547);} break; - case 47: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy151);} + case 49: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy547);} break; - case 48: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ -{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy441);} + case 50: /* cmd ::= COMPACT VNODES IN LP exprlist RP */ +{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy525);} break; - case 49: /* ids ::= ID */ - case 50: /* ids ::= STRING */ yytestcase(yyruleno==50); + case 51: /* ids ::= ID */ + case 52: /* ids ::= STRING */ yytestcase(yyruleno==52); {yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 51: /* ifexists ::= IF EXISTS */ + case 53: /* ifexists ::= IF EXISTS */ { yymsp[-1].minor.yy0.n = 1;} break; - case 52: /* ifexists ::= */ - case 54: /* ifnotexists ::= */ yytestcase(yyruleno==54); - case 175: /* distinct ::= */ yytestcase(yyruleno==175); + case 54: /* ifexists ::= */ + case 56: /* ifnotexists ::= */ yytestcase(yyruleno==56); + case 180: /* distinct ::= */ yytestcase(yyruleno==180); { yymsp[1].minor.yy0.n = 0;} break; - case 53: /* ifnotexists ::= IF NOT EXISTS */ + case 55: /* ifnotexists ::= IF NOT EXISTS */ { yymsp[-2].minor.yy0.n = 1;} break; - case 55: /* cmd ::= CREATE DNODE ids */ + case 57: /* cmd ::= CREATE DNODE ids */ { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; - case 56: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy151);} + case 58: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ +{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy547);} + break; + case 59: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + case 60: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==60); +{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy214, &yymsp[-2].minor.yy0);} + break; + case 61: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ +{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy31, &yymsp[0].minor.yy0, 1);} break; - case 57: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - case 58: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==58); -{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy382, &yymsp[-2].minor.yy0);} + case 62: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */ +{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy31, &yymsp[0].minor.yy0, 2);} break; - case 59: /* cmd ::= CREATE USER ids PASS ids */ + case 63: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; - case 60: /* pps ::= */ - case 62: /* tseries ::= */ yytestcase(yyruleno==62); - case 64: /* dbs ::= */ yytestcase(yyruleno==64); - case 66: /* streams ::= */ yytestcase(yyruleno==66); - case 68: /* storage ::= */ yytestcase(yyruleno==68); - case 70: /* qtime ::= */ yytestcase(yyruleno==70); - case 72: /* users ::= */ yytestcase(yyruleno==72); - case 74: /* conns ::= */ yytestcase(yyruleno==74); - case 76: /* state ::= */ yytestcase(yyruleno==76); + case 64: /* bufsize ::= */ + case 66: /* pps ::= */ yytestcase(yyruleno==66); + case 68: /* tseries ::= */ yytestcase(yyruleno==68); + case 70: /* dbs ::= */ yytestcase(yyruleno==70); + case 72: /* streams ::= */ yytestcase(yyruleno==72); + case 74: /* storage ::= */ yytestcase(yyruleno==74); + case 76: /* qtime ::= */ yytestcase(yyruleno==76); + case 78: /* users ::= */ yytestcase(yyruleno==78); + case 80: /* conns ::= */ yytestcase(yyruleno==80); + case 82: /* state ::= */ yytestcase(yyruleno==82); { yymsp[1].minor.yy0.n = 0; } break; - case 61: /* pps ::= PPS INTEGER */ - case 63: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==63); - case 65: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==65); - case 67: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==67); - case 69: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==69); - case 71: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==71); - case 73: /* users ::= USERS INTEGER */ yytestcase(yyruleno==73); - case 75: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==75); - case 77: /* state ::= STATE ids */ yytestcase(yyruleno==77); + case 65: /* bufsize ::= BUFSIZE INTEGER */ + case 67: /* pps ::= PPS INTEGER */ yytestcase(yyruleno==67); + case 69: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==69); + case 71: /* dbs ::= DBS INTEGER */ yytestcase(yyruleno==71); + case 73: /* streams ::= STREAMS INTEGER */ yytestcase(yyruleno==73); + case 75: /* storage ::= STORAGE INTEGER */ yytestcase(yyruleno==75); + case 77: /* qtime ::= QTIME INTEGER */ yytestcase(yyruleno==77); + case 79: /* users ::= USERS INTEGER */ yytestcase(yyruleno==79); + case 81: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==81); + case 83: /* state ::= STATE ids */ yytestcase(yyruleno==83); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 78: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ -{ - yylhsminor.yy151.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy151.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy151.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy151.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy151.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy151.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy151.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy151.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy151.stat = yymsp[0].minor.yy0; -} - yymsp[-8].minor.yy151 = yylhsminor.yy151; - break; - case 79: /* intitemlist ::= intitemlist COMMA intitem */ - case 150: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==150); -{ yylhsminor.yy441 = tVariantListAppend(yymsp[-2].minor.yy441, &yymsp[0].minor.yy506, -1); } - yymsp[-2].minor.yy441 = yylhsminor.yy441; - break; - case 80: /* intitemlist ::= intitem */ - case 151: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==151); -{ yylhsminor.yy441 = tVariantListAppend(NULL, &yymsp[0].minor.yy506, -1); } - yymsp[0].minor.yy441 = yylhsminor.yy441; - break; - case 81: /* intitem ::= INTEGER */ - case 152: /* tagitem ::= INTEGER */ yytestcase(yyruleno==152); - case 153: /* tagitem ::= FLOAT */ yytestcase(yyruleno==153); - case 154: /* tagitem ::= STRING */ yytestcase(yyruleno==154); - case 155: /* tagitem ::= BOOL */ yytestcase(yyruleno==155); + case 84: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ +{ + yylhsminor.yy547.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy547.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy547.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy547.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy547.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy547.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy547.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy547.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy547.stat = yymsp[0].minor.yy0; +} + yymsp[-8].minor.yy547 = yylhsminor.yy547; + break; + case 85: /* intitemlist ::= intitemlist COMMA intitem */ + case 154: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==154); +{ yylhsminor.yy525 = tVariantListAppend(yymsp[-2].minor.yy525, &yymsp[0].minor.yy506, -1); } + yymsp[-2].minor.yy525 = yylhsminor.yy525; + break; + case 86: /* intitemlist ::= intitem */ + case 155: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==155); +{ yylhsminor.yy525 = tVariantListAppend(NULL, &yymsp[0].minor.yy506, -1); } + yymsp[0].minor.yy525 = yylhsminor.yy525; + break; + case 87: /* intitem ::= INTEGER */ + case 156: /* tagitem ::= INTEGER */ yytestcase(yyruleno==156); + case 157: /* tagitem ::= FLOAT */ yytestcase(yyruleno==157); + case 158: /* tagitem ::= STRING */ yytestcase(yyruleno==158); + case 159: /* tagitem ::= BOOL */ yytestcase(yyruleno==159); { toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy506, &yymsp[0].minor.yy0); } yymsp[0].minor.yy506 = yylhsminor.yy506; break; - case 82: /* keep ::= KEEP intitemlist */ -{ yymsp[-1].minor.yy441 = yymsp[0].minor.yy441; } - break; - case 83: /* cache ::= CACHE INTEGER */ - case 84: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==84); - case 85: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==85); - case 86: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==86); - case 87: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==87); - case 88: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==88); - case 89: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==89); - case 90: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==90); - case 91: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==91); - case 92: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==92); - case 93: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==93); - case 94: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==94); - case 95: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==95); - case 96: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==96); - case 97: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==97); + case 88: /* keep ::= KEEP intitemlist */ +{ yymsp[-1].minor.yy525 = yymsp[0].minor.yy525; } + break; + case 89: /* cache ::= CACHE INTEGER */ + case 90: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==90); + case 91: /* quorum ::= QUORUM INTEGER */ yytestcase(yyruleno==91); + case 92: /* days ::= DAYS INTEGER */ yytestcase(yyruleno==92); + case 93: /* minrows ::= MINROWS INTEGER */ yytestcase(yyruleno==93); + case 94: /* maxrows ::= MAXROWS INTEGER */ yytestcase(yyruleno==94); + case 95: /* blocks ::= BLOCKS INTEGER */ yytestcase(yyruleno==95); + case 96: /* ctime ::= CTIME INTEGER */ yytestcase(yyruleno==96); + case 97: /* wal ::= WAL INTEGER */ yytestcase(yyruleno==97); + case 98: /* fsync ::= FSYNC INTEGER */ yytestcase(yyruleno==98); + case 99: /* comp ::= COMP INTEGER */ yytestcase(yyruleno==99); + case 100: /* prec ::= PRECISION STRING */ yytestcase(yyruleno==100); + case 101: /* update ::= UPDATE INTEGER */ yytestcase(yyruleno==101); + case 102: /* cachelast ::= CACHELAST INTEGER */ yytestcase(yyruleno==102); + case 103: /* partitions ::= PARTITIONS INTEGER */ yytestcase(yyruleno==103); { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 98: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy382); yymsp[1].minor.yy382.dbType = TSDB_DB_TYPE_DEFAULT;} - break; - case 99: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 100: /* db_optr ::= db_optr replica */ - case 117: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==117); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 101: /* db_optr ::= db_optr quorum */ - case 118: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==118); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 102: /* db_optr ::= db_optr days */ -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 103: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 104: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 105: /* db_optr ::= db_optr blocks */ - case 120: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==120); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 106: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 107: /* db_optr ::= db_optr wal */ - case 122: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==122); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 108: /* db_optr ::= db_optr fsync */ - case 123: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==123); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 109: /* db_optr ::= db_optr comp */ - case 121: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==121); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 110: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 111: /* db_optr ::= db_optr keep */ - case 119: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==119); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.keep = yymsp[0].minor.yy441; } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 112: /* db_optr ::= db_optr update */ - case 124: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==124); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 113: /* db_optr ::= db_optr cachelast */ - case 125: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==125); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 114: /* topic_optr ::= db_optr */ - case 126: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==126); -{ yylhsminor.yy382 = yymsp[0].minor.yy382; yylhsminor.yy382.dbType = TSDB_DB_TYPE_TOPIC; } - yymsp[0].minor.yy382 = yylhsminor.yy382; - break; - case 115: /* topic_optr ::= topic_optr partitions */ - case 127: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==127); -{ yylhsminor.yy382 = yymsp[-1].minor.yy382; yylhsminor.yy382.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy382 = yylhsminor.yy382; - break; - case 116: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy382); yymsp[1].minor.yy382.dbType = TSDB_DB_TYPE_DEFAULT;} - break; - case 128: /* typename ::= ids */ + case 104: /* db_optr ::= */ +{setDefaultCreateDbOption(&yymsp[1].minor.yy214); yymsp[1].minor.yy214.dbType = TSDB_DB_TYPE_DEFAULT;} + break; + case 105: /* db_optr ::= db_optr cache */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 106: /* db_optr ::= db_optr replica */ + case 123: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==123); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 107: /* db_optr ::= db_optr quorum */ + case 124: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==124); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 108: /* db_optr ::= db_optr days */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 109: /* db_optr ::= db_optr minrows */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 110: /* db_optr ::= db_optr maxrows */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 111: /* db_optr ::= db_optr blocks */ + case 126: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==126); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 112: /* db_optr ::= db_optr ctime */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 113: /* db_optr ::= db_optr wal */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 114: /* db_optr ::= db_optr fsync */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 115: /* db_optr ::= db_optr comp */ + case 127: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==127); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 116: /* db_optr ::= db_optr prec */ +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 117: /* db_optr ::= db_optr keep */ + case 125: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==125); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.keep = yymsp[0].minor.yy525; } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 118: /* db_optr ::= db_optr update */ + case 128: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==128); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 119: /* db_optr ::= db_optr cachelast */ + case 129: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==129); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 120: /* topic_optr ::= db_optr */ + case 130: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==130); +{ yylhsminor.yy214 = yymsp[0].minor.yy214; yylhsminor.yy214.dbType = TSDB_DB_TYPE_TOPIC; } + yymsp[0].minor.yy214 = yylhsminor.yy214; + break; + case 121: /* topic_optr ::= topic_optr partitions */ + case 131: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==131); +{ yylhsminor.yy214 = yymsp[-1].minor.yy214; yylhsminor.yy214.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy214 = yylhsminor.yy214; + break; + case 122: /* alter_db_optr ::= */ +{ setDefaultCreateDbOption(&yymsp[1].minor.yy214); yymsp[1].minor.yy214.dbType = TSDB_DB_TYPE_DEFAULT;} + break; + case 132: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSetColumnType (&yylhsminor.yy343, &yymsp[0].minor.yy0); + tSetColumnType (&yylhsminor.yy31, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy343 = yylhsminor.yy343; + yymsp[0].minor.yy31 = yylhsminor.yy31; break; - case 129: /* typename ::= ids LP signed RP */ + case 133: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy369 <= 0) { + if (yymsp[-1].minor.yy501 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSetColumnType(&yylhsminor.yy343, &yymsp[-3].minor.yy0); + tSetColumnType(&yylhsminor.yy31, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy369; // negative value of name length - tSetColumnType(&yylhsminor.yy343, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy501; // negative value of name length + tSetColumnType(&yylhsminor.yy31, &yymsp[-3].minor.yy0); } } - yymsp[-3].minor.yy343 = yylhsminor.yy343; + yymsp[-3].minor.yy31 = yylhsminor.yy31; break; - case 130: /* typename ::= ids UNSIGNED */ + case 134: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); - tSetColumnType (&yylhsminor.yy343, &yymsp[-1].minor.yy0); + tSetColumnType (&yylhsminor.yy31, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy343 = yylhsminor.yy343; + yymsp[-1].minor.yy31 = yylhsminor.yy31; break; - case 131: /* signed ::= INTEGER */ -{ yylhsminor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy369 = yylhsminor.yy369; + case 135: /* signed ::= INTEGER */ +{ yylhsminor.yy501 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy501 = yylhsminor.yy501; break; - case 132: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy369 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + case 136: /* signed ::= PLUS INTEGER */ +{ yymsp[-1].minor.yy501 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; - case 133: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy369 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} + case 137: /* signed ::= MINUS INTEGER */ +{ yymsp[-1].minor.yy501 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 137: /* cmd ::= CREATE TABLE create_table_list */ -{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy182;} + case 141: /* cmd ::= CREATE TABLE create_table_list */ +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy158;} break; - case 138: /* create_table_list ::= create_from_stable */ + case 142: /* create_table_list ::= create_from_stable */ { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); - taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy456); + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy432); pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; - yylhsminor.yy182 = pCreateTable; + yylhsminor.yy158 = pCreateTable; } - yymsp[0].minor.yy182 = yylhsminor.yy182; + yymsp[0].minor.yy158 = yylhsminor.yy158; break; - case 139: /* create_table_list ::= create_table_list create_from_stable */ + case 143: /* create_table_list ::= create_table_list create_from_stable */ { - taosArrayPush(yymsp[-1].minor.yy182->childTableInfo, &yymsp[0].minor.yy456); - yylhsminor.yy182 = yymsp[-1].minor.yy182; + taosArrayPush(yymsp[-1].minor.yy158->childTableInfo, &yymsp[0].minor.yy432); + yylhsminor.yy158 = yymsp[-1].minor.yy158; } - yymsp[-1].minor.yy182 = yylhsminor.yy182; + yymsp[-1].minor.yy158 = yylhsminor.yy158; break; - case 140: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + case 144: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { - yylhsminor.yy182 = tSetCreateTableInfo(yymsp[-1].minor.yy441, NULL, NULL, TSQL_CREATE_TABLE); - setSqlInfo(pInfo, yylhsminor.yy182, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy158 = tSetCreateTableInfo(yymsp[-1].minor.yy525, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy158, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy182 = yylhsminor.yy182; + yymsp[-5].minor.yy158 = yylhsminor.yy158; break; - case 141: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + case 145: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { - yylhsminor.yy182 = tSetCreateTableInfo(yymsp[-5].minor.yy441, yymsp[-1].minor.yy441, NULL, TSQL_CREATE_STABLE); - setSqlInfo(pInfo, yylhsminor.yy182, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy158 = tSetCreateTableInfo(yymsp[-5].minor.yy525, yymsp[-1].minor.yy525, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy158, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy182 = yylhsminor.yy182; + yymsp[-9].minor.yy158 = yylhsminor.yy158; break; - case 142: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + case 146: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy456 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy441, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy432 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy525, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy456 = yylhsminor.yy456; + yymsp[-9].minor.yy432 = yylhsminor.yy432; break; - case 143: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; - yylhsminor.yy456 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy441, yymsp[-1].minor.yy441, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); + yylhsminor.yy432 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy525, yymsp[-1].minor.yy525, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } - yymsp[-12].minor.yy456 = yylhsminor.yy456; + yymsp[-12].minor.yy432 = yylhsminor.yy432; break; - case 144: /* tagNamelist ::= tagNamelist COMMA ids */ -{taosArrayPush(yymsp[-2].minor.yy441, &yymsp[0].minor.yy0); yylhsminor.yy441 = yymsp[-2].minor.yy441; } - yymsp[-2].minor.yy441 = yylhsminor.yy441; + case 148: /* tagNamelist ::= tagNamelist COMMA ids */ +{taosArrayPush(yymsp[-2].minor.yy525, &yymsp[0].minor.yy0); yylhsminor.yy525 = yymsp[-2].minor.yy525; } + yymsp[-2].minor.yy525 = yylhsminor.yy525; break; - case 145: /* tagNamelist ::= ids */ -{yylhsminor.yy441 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy441, &yymsp[0].minor.yy0);} - yymsp[0].minor.yy441 = yylhsminor.yy441; + case 149: /* tagNamelist ::= ids */ +{yylhsminor.yy525 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy525, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy525 = yylhsminor.yy525; break; - case 146: /* create_table_args ::= ifnotexists ids cpxName AS select */ + case 150: /* create_table_args ::= ifnotexists ids cpxName AS select */ { - yylhsminor.yy182 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy236, TSQL_CREATE_STREAM); - setSqlInfo(pInfo, yylhsminor.yy182, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy158 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy464, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy158, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy182 = yylhsminor.yy182; + yymsp[-4].minor.yy158 = yylhsminor.yy158; break; - case 147: /* columnlist ::= columnlist COMMA column */ -{taosArrayPush(yymsp[-2].minor.yy441, &yymsp[0].minor.yy343); yylhsminor.yy441 = yymsp[-2].minor.yy441; } - yymsp[-2].minor.yy441 = yylhsminor.yy441; + case 151: /* columnlist ::= columnlist COMMA column */ +{taosArrayPush(yymsp[-2].minor.yy525, &yymsp[0].minor.yy31); yylhsminor.yy525 = yymsp[-2].minor.yy525; } + yymsp[-2].minor.yy525 = yylhsminor.yy525; break; - case 148: /* columnlist ::= column */ -{yylhsminor.yy441 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy441, &yymsp[0].minor.yy343);} - yymsp[0].minor.yy441 = yylhsminor.yy441; + case 152: /* columnlist ::= column */ +{yylhsminor.yy525 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy525, &yymsp[0].minor.yy31);} + yymsp[0].minor.yy525 = yylhsminor.yy525; break; - case 149: /* column ::= ids typename */ + case 153: /* column ::= ids typename */ { - tSetColumnInfo(&yylhsminor.yy343, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy343); + tSetColumnInfo(&yylhsminor.yy31, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy31); } - yymsp[-1].minor.yy343 = yylhsminor.yy343; + yymsp[-1].minor.yy31 = yylhsminor.yy31; break; - case 156: /* tagitem ::= NULL */ + case 160: /* tagitem ::= NULL */ { yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy506, &yymsp[0].minor.yy0); } yymsp[0].minor.yy506 = yylhsminor.yy506; break; - case 157: /* tagitem ::= MINUS INTEGER */ - case 158: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==158); - case 159: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==159); - case 160: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==160); + case 161: /* tagitem ::= NOW */ +{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy506, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy506 = yylhsminor.yy506; + break; + case 162: /* tagitem ::= MINUS INTEGER */ + case 163: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==163); + case 164: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==164); + case 165: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==165); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; @@ -2677,179 +2723,179 @@ static void yy_reduce( } yymsp[-1].minor.yy506 = yylhsminor.yy506; break; - case 161: /* select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 166: /* select ::= SELECT selcollist from where_opt interval_opt session_option windowstate_option fill_opt sliding_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */ { - yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy441, yymsp[-11].minor.yy244, yymsp[-10].minor.yy166, yymsp[-4].minor.yy441, yymsp[-3].minor.yy441, &yymsp[-9].minor.yy340, &yymsp[-8].minor.yy259, &yymsp[-7].minor.yy348, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy441, &yymsp[0].minor.yy414, &yymsp[-1].minor.yy414, yymsp[-2].minor.yy166); + yylhsminor.yy464 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy525, yymsp[-11].minor.yy412, yymsp[-10].minor.yy370, yymsp[-4].minor.yy525, yymsp[-2].minor.yy525, &yymsp[-9].minor.yy520, &yymsp[-8].minor.yy259, &yymsp[-7].minor.yy144, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy525, &yymsp[0].minor.yy126, &yymsp[-1].minor.yy126, yymsp[-3].minor.yy370); } - yymsp[-13].minor.yy236 = yylhsminor.yy236; + yymsp[-13].minor.yy464 = yylhsminor.yy464; break; - case 162: /* select ::= LP select RP */ -{yymsp[-2].minor.yy236 = yymsp[-1].minor.yy236;} + case 167: /* select ::= LP select RP */ +{yymsp[-2].minor.yy464 = yymsp[-1].minor.yy464;} break; - case 163: /* union ::= select */ -{ yylhsminor.yy441 = setSubclause(NULL, yymsp[0].minor.yy236); } - yymsp[0].minor.yy441 = yylhsminor.yy441; + case 168: /* union ::= select */ +{ yylhsminor.yy525 = setSubclause(NULL, yymsp[0].minor.yy464); } + yymsp[0].minor.yy525 = yylhsminor.yy525; break; - case 164: /* union ::= union UNION ALL select */ -{ yylhsminor.yy441 = appendSelectClause(yymsp[-3].minor.yy441, yymsp[0].minor.yy236); } - yymsp[-3].minor.yy441 = yylhsminor.yy441; + case 169: /* union ::= union UNION ALL select */ +{ yylhsminor.yy525 = appendSelectClause(yymsp[-3].minor.yy525, yymsp[0].minor.yy464); } + yymsp[-3].minor.yy525 = yylhsminor.yy525; break; - case 165: /* cmd ::= union */ -{ setSqlInfo(pInfo, yymsp[0].minor.yy441, NULL, TSDB_SQL_SELECT); } + case 170: /* cmd ::= union */ +{ setSqlInfo(pInfo, yymsp[0].minor.yy525, NULL, TSDB_SQL_SELECT); } break; - case 166: /* select ::= SELECT selcollist */ + case 171: /* select ::= SELECT selcollist */ { - yylhsminor.yy236 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy441, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy464 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy525, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } - yymsp[-1].minor.yy236 = yylhsminor.yy236; + yymsp[-1].minor.yy464 = yylhsminor.yy464; break; - case 167: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy441 = yymsp[-1].minor.yy441;} - yymsp[-1].minor.yy441 = yylhsminor.yy441; + case 172: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy525 = yymsp[-1].minor.yy525;} + yymsp[-1].minor.yy525 = yylhsminor.yy525; break; - case 168: /* sclp ::= */ - case 198: /* orderby_opt ::= */ yytestcase(yyruleno==198); -{yymsp[1].minor.yy441 = 0;} + case 173: /* sclp ::= */ + case 203: /* orderby_opt ::= */ yytestcase(yyruleno==203); +{yymsp[1].minor.yy525 = 0;} break; - case 169: /* selcollist ::= sclp distinct expr as */ + case 174: /* selcollist ::= sclp distinct expr as */ { - yylhsminor.yy441 = tSqlExprListAppend(yymsp[-3].minor.yy441, yymsp[-1].minor.yy166, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy525 = tSqlExprListAppend(yymsp[-3].minor.yy525, yymsp[-1].minor.yy370, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } - yymsp[-3].minor.yy441 = yylhsminor.yy441; + yymsp[-3].minor.yy525 = yylhsminor.yy525; break; - case 170: /* selcollist ::= sclp STAR */ + case 175: /* selcollist ::= sclp STAR */ { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); - yylhsminor.yy441 = tSqlExprListAppend(yymsp[-1].minor.yy441, pNode, 0, 0); + yylhsminor.yy525 = tSqlExprListAppend(yymsp[-1].minor.yy525, pNode, 0, 0); } - yymsp[-1].minor.yy441 = yylhsminor.yy441; + yymsp[-1].minor.yy525 = yylhsminor.yy525; break; - case 171: /* as ::= AS ids */ + case 176: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 172: /* as ::= ids */ + case 177: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 173: /* as ::= */ + case 178: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 174: /* distinct ::= DISTINCT */ + case 179: /* distinct ::= DISTINCT */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 176: /* from ::= FROM tablelist */ - case 177: /* from ::= FROM sub */ yytestcase(yyruleno==177); -{yymsp[-1].minor.yy244 = yymsp[0].minor.yy244;} + case 181: /* from ::= FROM tablelist */ + case 182: /* from ::= FROM sub */ yytestcase(yyruleno==182); +{yymsp[-1].minor.yy412 = yymsp[0].minor.yy412;} break; - case 178: /* sub ::= LP union RP */ -{yymsp[-2].minor.yy244 = addSubqueryElem(NULL, yymsp[-1].minor.yy441, NULL);} + case 183: /* sub ::= LP union RP */ +{yymsp[-2].minor.yy412 = addSubqueryElem(NULL, yymsp[-1].minor.yy525, NULL);} break; - case 179: /* sub ::= LP union RP ids */ -{yymsp[-3].minor.yy244 = addSubqueryElem(NULL, yymsp[-2].minor.yy441, &yymsp[0].minor.yy0);} + case 184: /* sub ::= LP union RP ids */ +{yymsp[-3].minor.yy412 = addSubqueryElem(NULL, yymsp[-2].minor.yy525, &yymsp[0].minor.yy0);} break; - case 180: /* sub ::= sub COMMA LP union RP ids */ -{yylhsminor.yy244 = addSubqueryElem(yymsp[-5].minor.yy244, yymsp[-2].minor.yy441, &yymsp[0].minor.yy0);} - yymsp[-5].minor.yy244 = yylhsminor.yy244; + case 185: /* sub ::= sub COMMA LP union RP ids */ +{yylhsminor.yy412 = addSubqueryElem(yymsp[-5].minor.yy412, yymsp[-2].minor.yy525, &yymsp[0].minor.yy0);} + yymsp[-5].minor.yy412 = yylhsminor.yy412; break; - case 181: /* tablelist ::= ids cpxName */ + case 186: /* tablelist ::= ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy244 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy412 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } - yymsp[-1].minor.yy244 = yylhsminor.yy244; + yymsp[-1].minor.yy412 = yylhsminor.yy412; break; - case 182: /* tablelist ::= ids cpxName ids */ + case 187: /* tablelist ::= ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy244 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy412 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy244 = yylhsminor.yy244; + yymsp[-2].minor.yy412 = yylhsminor.yy412; break; - case 183: /* tablelist ::= tablelist COMMA ids cpxName */ + case 188: /* tablelist ::= tablelist COMMA ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy244 = setTableNameList(yymsp[-3].minor.yy244, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy412 = setTableNameList(yymsp[-3].minor.yy412, &yymsp[-1].minor.yy0, NULL); } - yymsp[-3].minor.yy244 = yylhsminor.yy244; + yymsp[-3].minor.yy412 = yylhsminor.yy412; break; - case 184: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 189: /* tablelist ::= tablelist COMMA ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy244 = setTableNameList(yymsp[-4].minor.yy244, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy412 = setTableNameList(yymsp[-4].minor.yy412, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-4].minor.yy244 = yylhsminor.yy244; + yymsp[-4].minor.yy412 = yylhsminor.yy412; break; - case 185: /* tmvar ::= VARIABLE */ + case 190: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 186: /* interval_opt ::= INTERVAL LP tmvar RP */ -{yymsp[-3].minor.yy340.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy340.offset.n = 0;} + case 191: /* interval_opt ::= INTERVAL LP tmvar RP */ +{yymsp[-3].minor.yy520.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy520.offset.n = 0;} break; - case 187: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -{yymsp[-5].minor.yy340.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy340.offset = yymsp[-1].minor.yy0;} + case 192: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ +{yymsp[-5].minor.yy520.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy520.offset = yymsp[-1].minor.yy0;} break; - case 188: /* interval_opt ::= */ -{memset(&yymsp[1].minor.yy340, 0, sizeof(yymsp[1].minor.yy340));} + case 193: /* interval_opt ::= */ +{memset(&yymsp[1].minor.yy520, 0, sizeof(yymsp[1].minor.yy520));} break; - case 189: /* session_option ::= */ + case 194: /* session_option ::= */ {yymsp[1].minor.yy259.col.n = 0; yymsp[1].minor.yy259.gap.n = 0;} break; - case 190: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + case 195: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; yymsp[-6].minor.yy259.col = yymsp[-4].minor.yy0; yymsp[-6].minor.yy259.gap = yymsp[-1].minor.yy0; } break; - case 191: /* windowstate_option ::= */ -{ yymsp[1].minor.yy348.col.n = 0; yymsp[1].minor.yy348.col.z = NULL;} + case 196: /* windowstate_option ::= */ +{ yymsp[1].minor.yy144.col.n = 0; yymsp[1].minor.yy144.col.z = NULL;} break; - case 192: /* windowstate_option ::= STATE_WINDOW LP ids RP */ -{ yymsp[-3].minor.yy348.col = yymsp[-1].minor.yy0; } + case 197: /* windowstate_option ::= STATE_WINDOW LP ids RP */ +{ yymsp[-3].minor.yy144.col = yymsp[-1].minor.yy0; } break; - case 193: /* fill_opt ::= */ -{ yymsp[1].minor.yy441 = 0; } + case 198: /* fill_opt ::= */ +{ yymsp[1].minor.yy525 = 0; } break; - case 194: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 199: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy441, &A, -1, 0); - yymsp[-5].minor.yy441 = yymsp[-1].minor.yy441; + tVariantListInsert(yymsp[-1].minor.yy525, &A, -1, 0); + yymsp[-5].minor.yy525 = yymsp[-1].minor.yy525; } break; - case 195: /* fill_opt ::= FILL LP ID RP */ + case 200: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy441 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy525 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 196: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 201: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 197: /* sliding_opt ::= */ + case 202: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 199: /* orderby_opt ::= ORDER BY sortlist */ -{yymsp[-2].minor.yy441 = yymsp[0].minor.yy441;} + case 204: /* orderby_opt ::= ORDER BY sortlist */ +{yymsp[-2].minor.yy525 = yymsp[0].minor.yy525;} break; - case 200: /* sortlist ::= sortlist COMMA item sortorder */ + case 205: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy441 = tVariantListAppend(yymsp[-3].minor.yy441, &yymsp[-1].minor.yy506, yymsp[0].minor.yy112); + yylhsminor.yy525 = tVariantListAppend(yymsp[-3].minor.yy525, &yymsp[-1].minor.yy506, yymsp[0].minor.yy52); } - yymsp[-3].minor.yy441 = yylhsminor.yy441; + yymsp[-3].minor.yy525 = yylhsminor.yy525; break; - case 201: /* sortlist ::= item sortorder */ + case 206: /* sortlist ::= item sortorder */ { - yylhsminor.yy441 = tVariantListAppend(NULL, &yymsp[-1].minor.yy506, yymsp[0].minor.yy112); + yylhsminor.yy525 = tVariantListAppend(NULL, &yymsp[-1].minor.yy506, yymsp[0].minor.yy52); } - yymsp[-1].minor.yy441 = yylhsminor.yy441; + yymsp[-1].minor.yy525 = yylhsminor.yy525; break; - case 202: /* item ::= ids cpxName */ + case 207: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; @@ -2858,227 +2904,227 @@ static void yy_reduce( } yymsp[-1].minor.yy506 = yylhsminor.yy506; break; - case 203: /* sortorder ::= ASC */ -{ yymsp[0].minor.yy112 = TSDB_ORDER_ASC; } + case 208: /* sortorder ::= ASC */ +{ yymsp[0].minor.yy52 = TSDB_ORDER_ASC; } break; - case 204: /* sortorder ::= DESC */ -{ yymsp[0].minor.yy112 = TSDB_ORDER_DESC;} + case 209: /* sortorder ::= DESC */ +{ yymsp[0].minor.yy52 = TSDB_ORDER_DESC;} break; - case 205: /* sortorder ::= */ -{ yymsp[1].minor.yy112 = TSDB_ORDER_ASC; } + case 210: /* sortorder ::= */ +{ yymsp[1].minor.yy52 = TSDB_ORDER_ASC; } break; - case 206: /* groupby_opt ::= */ -{ yymsp[1].minor.yy441 = 0;} + case 211: /* groupby_opt ::= */ +{ yymsp[1].minor.yy525 = 0;} break; - case 207: /* groupby_opt ::= GROUP BY grouplist */ -{ yymsp[-2].minor.yy441 = yymsp[0].minor.yy441;} + case 212: /* groupby_opt ::= GROUP BY grouplist */ +{ yymsp[-2].minor.yy525 = yymsp[0].minor.yy525;} break; - case 208: /* grouplist ::= grouplist COMMA item */ + case 213: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy441 = tVariantListAppend(yymsp[-2].minor.yy441, &yymsp[0].minor.yy506, -1); + yylhsminor.yy525 = tVariantListAppend(yymsp[-2].minor.yy525, &yymsp[0].minor.yy506, -1); } - yymsp[-2].minor.yy441 = yylhsminor.yy441; + yymsp[-2].minor.yy525 = yylhsminor.yy525; break; - case 209: /* grouplist ::= item */ + case 214: /* grouplist ::= item */ { - yylhsminor.yy441 = tVariantListAppend(NULL, &yymsp[0].minor.yy506, -1); + yylhsminor.yy525 = tVariantListAppend(NULL, &yymsp[0].minor.yy506, -1); } - yymsp[0].minor.yy441 = yylhsminor.yy441; + yymsp[0].minor.yy525 = yylhsminor.yy525; break; - case 210: /* having_opt ::= */ - case 220: /* where_opt ::= */ yytestcase(yyruleno==220); - case 262: /* expritem ::= */ yytestcase(yyruleno==262); -{yymsp[1].minor.yy166 = 0;} + case 215: /* having_opt ::= */ + case 225: /* where_opt ::= */ yytestcase(yyruleno==225); + case 267: /* expritem ::= */ yytestcase(yyruleno==267); +{yymsp[1].minor.yy370 = 0;} break; - case 211: /* having_opt ::= HAVING expr */ - case 221: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==221); -{yymsp[-1].minor.yy166 = yymsp[0].minor.yy166;} + case 216: /* having_opt ::= HAVING expr */ + case 226: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==226); +{yymsp[-1].minor.yy370 = yymsp[0].minor.yy370;} break; - case 212: /* limit_opt ::= */ - case 216: /* slimit_opt ::= */ yytestcase(yyruleno==216); -{yymsp[1].minor.yy414.limit = -1; yymsp[1].minor.yy414.offset = 0;} + case 217: /* limit_opt ::= */ + case 221: /* slimit_opt ::= */ yytestcase(yyruleno==221); +{yymsp[1].minor.yy126.limit = -1; yymsp[1].minor.yy126.offset = 0;} break; - case 213: /* limit_opt ::= LIMIT signed */ - case 217: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==217); -{yymsp[-1].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-1].minor.yy414.offset = 0;} + case 218: /* limit_opt ::= LIMIT signed */ + case 222: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==222); +{yymsp[-1].minor.yy126.limit = yymsp[0].minor.yy501; yymsp[-1].minor.yy126.offset = 0;} break; - case 214: /* limit_opt ::= LIMIT signed OFFSET signed */ -{ yymsp[-3].minor.yy414.limit = yymsp[-2].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[0].minor.yy369;} + case 219: /* limit_opt ::= LIMIT signed OFFSET signed */ +{ yymsp[-3].minor.yy126.limit = yymsp[-2].minor.yy501; yymsp[-3].minor.yy126.offset = yymsp[0].minor.yy501;} break; - case 215: /* limit_opt ::= LIMIT signed COMMA signed */ -{ yymsp[-3].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[-2].minor.yy369;} + case 220: /* limit_opt ::= LIMIT signed COMMA signed */ +{ yymsp[-3].minor.yy126.limit = yymsp[0].minor.yy501; yymsp[-3].minor.yy126.offset = yymsp[-2].minor.yy501;} break; - case 218: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -{yymsp[-3].minor.yy414.limit = yymsp[-2].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[0].minor.yy369;} + case 223: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +{yymsp[-3].minor.yy126.limit = yymsp[-2].minor.yy501; yymsp[-3].minor.yy126.offset = yymsp[0].minor.yy501;} break; - case 219: /* slimit_opt ::= SLIMIT signed COMMA signed */ -{yymsp[-3].minor.yy414.limit = yymsp[0].minor.yy369; yymsp[-3].minor.yy414.offset = yymsp[-2].minor.yy369;} + case 224: /* slimit_opt ::= SLIMIT signed COMMA signed */ +{yymsp[-3].minor.yy126.limit = yymsp[0].minor.yy501; yymsp[-3].minor.yy126.offset = yymsp[-2].minor.yy501;} break; - case 222: /* expr ::= LP expr RP */ -{yylhsminor.yy166 = yymsp[-1].minor.yy166; yylhsminor.yy166->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy166->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 227: /* expr ::= LP expr RP */ +{yylhsminor.yy370 = yymsp[-1].minor.yy370; yylhsminor.yy370->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy370->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 223: /* expr ::= ID */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 228: /* expr ::= ID */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 224: /* expr ::= ID DOT ID */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 229: /* expr ::= ID DOT ID */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 225: /* expr ::= ID DOT STAR */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 230: /* expr ::= ID DOT STAR */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 226: /* expr ::= INTEGER */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 231: /* expr ::= INTEGER */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 227: /* expr ::= MINUS INTEGER */ - case 228: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==228); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy166 = yylhsminor.yy166; + case 232: /* expr ::= MINUS INTEGER */ + case 233: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==233); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy370 = yylhsminor.yy370; break; - case 229: /* expr ::= FLOAT */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 234: /* expr ::= FLOAT */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 230: /* expr ::= MINUS FLOAT */ - case 231: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==231); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy166 = yylhsminor.yy166; + case 235: /* expr ::= MINUS FLOAT */ + case 236: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==236); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy370 = yylhsminor.yy370; break; - case 232: /* expr ::= STRING */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 237: /* expr ::= STRING */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 233: /* expr ::= NOW */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 238: /* expr ::= NOW */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 234: /* expr ::= VARIABLE */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 239: /* expr ::= VARIABLE */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 235: /* expr ::= PLUS VARIABLE */ - case 236: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==236); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} - yymsp[-1].minor.yy166 = yylhsminor.yy166; + case 240: /* expr ::= PLUS VARIABLE */ + case 241: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==241); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);} + yymsp[-1].minor.yy370 = yylhsminor.yy370; break; - case 237: /* expr ::= BOOL */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 242: /* expr ::= BOOL */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 238: /* expr ::= NULL */ -{ yylhsminor.yy166 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 243: /* expr ::= NULL */ +{ yylhsminor.yy370 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 239: /* expr ::= ID LP exprlist RP */ -{ yylhsminor.yy166 = tSqlExprCreateFunction(yymsp[-1].minor.yy441, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy166 = yylhsminor.yy166; + case 244: /* expr ::= ID LP exprlist RP */ +{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy370 = tSqlExprCreateFunction(yymsp[-1].minor.yy525, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy370 = yylhsminor.yy370; break; - case 240: /* expr ::= ID LP STAR RP */ -{ yylhsminor.yy166 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy166 = yylhsminor.yy166; + case 245: /* expr ::= ID LP STAR RP */ +{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy370 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy370 = yylhsminor.yy370; break; - case 241: /* expr ::= expr IS NULL */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, NULL, TK_ISNULL);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 246: /* expr ::= expr IS NULL */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, NULL, TK_ISNULL);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 242: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-3].minor.yy166, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy166 = yylhsminor.yy166; + case 247: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-3].minor.yy370, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy370 = yylhsminor.yy370; break; - case 243: /* expr ::= expr LT expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_LT);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 248: /* expr ::= expr LT expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_LT);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 244: /* expr ::= expr GT expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_GT);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 249: /* expr ::= expr GT expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_GT);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 245: /* expr ::= expr LE expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_LE);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 250: /* expr ::= expr LE expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_LE);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 246: /* expr ::= expr GE expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_GE);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 251: /* expr ::= expr GE expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_GE);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 247: /* expr ::= expr NE expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_NE);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 252: /* expr ::= expr NE expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_NE);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 248: /* expr ::= expr EQ expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_EQ);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 253: /* expr ::= expr EQ expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_EQ);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 249: /* expr ::= expr BETWEEN expr AND expr */ -{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy166); yylhsminor.yy166 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy166, yymsp[-2].minor.yy166, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy166, TK_LE), TK_AND);} - yymsp[-4].minor.yy166 = yylhsminor.yy166; + case 254: /* expr ::= expr BETWEEN expr AND expr */ +{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy370); yylhsminor.yy370 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy370, yymsp[-2].minor.yy370, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy370, TK_LE), TK_AND);} + yymsp[-4].minor.yy370 = yylhsminor.yy370; break; - case 250: /* expr ::= expr AND expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_AND);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 255: /* expr ::= expr AND expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_AND);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 251: /* expr ::= expr OR expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_OR); } - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 256: /* expr ::= expr OR expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_OR); } + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 252: /* expr ::= expr PLUS expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_PLUS); } - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 257: /* expr ::= expr PLUS expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_PLUS); } + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 253: /* expr ::= expr MINUS expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_MINUS); } - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 258: /* expr ::= expr MINUS expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_MINUS); } + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 254: /* expr ::= expr STAR expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_STAR); } - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 259: /* expr ::= expr STAR expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_STAR); } + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 255: /* expr ::= expr SLASH expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_DIVIDE);} - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 260: /* expr ::= expr SLASH expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_DIVIDE);} + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 256: /* expr ::= expr REM expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_REM); } - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 261: /* expr ::= expr REM expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_REM); } + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 257: /* expr ::= expr LIKE expr */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-2].minor.yy166, yymsp[0].minor.yy166, TK_LIKE); } - yymsp[-2].minor.yy166 = yylhsminor.yy166; + case 262: /* expr ::= expr LIKE expr */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-2].minor.yy370, yymsp[0].minor.yy370, TK_LIKE); } + yymsp[-2].minor.yy370 = yylhsminor.yy370; break; - case 258: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy166 = tSqlExprCreate(yymsp[-4].minor.yy166, (tSqlExpr*)yymsp[-1].minor.yy441, TK_IN); } - yymsp[-4].minor.yy166 = yylhsminor.yy166; + case 263: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy370 = tSqlExprCreate(yymsp[-4].minor.yy370, (tSqlExpr*)yymsp[-1].minor.yy525, TK_IN); } + yymsp[-4].minor.yy370 = yylhsminor.yy370; break; - case 259: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy441 = tSqlExprListAppend(yymsp[-2].minor.yy441,yymsp[0].minor.yy166,0, 0);} - yymsp[-2].minor.yy441 = yylhsminor.yy441; + case 264: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy525 = tSqlExprListAppend(yymsp[-2].minor.yy525,yymsp[0].minor.yy370,0, 0);} + yymsp[-2].minor.yy525 = yylhsminor.yy525; break; - case 260: /* exprlist ::= expritem */ -{yylhsminor.yy441 = tSqlExprListAppend(0,yymsp[0].minor.yy166,0, 0);} - yymsp[0].minor.yy441 = yylhsminor.yy441; + case 265: /* exprlist ::= expritem */ +{yylhsminor.yy525 = tSqlExprListAppend(0,yymsp[0].minor.yy370,0, 0);} + yymsp[0].minor.yy525 = yylhsminor.yy525; break; - case 261: /* expritem ::= expr */ -{yylhsminor.yy166 = yymsp[0].minor.yy166;} - yymsp[0].minor.yy166 = yylhsminor.yy166; + case 266: /* expritem ::= expr */ +{yylhsminor.yy370 = yymsp[0].minor.yy370;} + yymsp[0].minor.yy370 = yylhsminor.yy370; break; - case 263: /* cmd ::= RESET QUERY CACHE */ + case 268: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 264: /* cmd ::= SYNCDB ids REPLICA */ + case 269: /* cmd ::= SYNCDB ids REPLICA */ { setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} break; - case 265: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 270: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 266: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 271: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3089,21 +3135,21 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 267: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ + case 272: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 268: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 273: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 269: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 274: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3114,7 +3160,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 270: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 275: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3128,7 +3174,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 271: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 276: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3140,21 +3186,21 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 272: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ + case 277: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 273: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 278: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 274: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 279: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3165,21 +3211,21 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 275: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ + case 280: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 276: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 281: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 277: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 282: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3190,7 +3236,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 278: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 283: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3204,7 +3250,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 279: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ + case 284: /* cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; @@ -3216,20 +3262,20 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 280: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ + case 285: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy441, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy525, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 281: /* cmd ::= KILL CONNECTION INTEGER */ + case 286: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 282: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 287: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 283: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 288: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/query/tests/CMakeLists.txt b/src/query/tests/CMakeLists.txt index 0cfe2ff1659027561799145e8ef8ed514988416c..cc4b607bb44f8d85ab61a5c0b035e5f1fc976161 100644 --- a/src/query/tests/CMakeLists.txt +++ b/src/query/tests/CMakeLists.txt @@ -1,10 +1,11 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) -FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) +FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) +FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) -IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) +IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build unit test") # GoogleTest requires at least C++11 diff --git a/src/query/tests/astTest.cpp b/src/query/tests/astTest.cpp index ce7b2f94a177576c8046b299bcb2e695fb5ead2d..1143d00e8da9e77cd0f740d98fe77ffd1beac4bc 100644 --- a/src/query/tests/astTest.cpp +++ b/src/query/tests/astTest.cpp @@ -10,6 +10,7 @@ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" typedef struct ResultObj { int32_t numOfResult; diff --git a/src/query/tests/histogramTest.cpp b/src/query/tests/histogramTest.cpp index 3088d6f8078483c39ec3780250e8bfa2d613f218..0266ecffc11348dcd0184030584ed7b721d39aff 100644 --- a/src/query/tests/histogramTest.cpp +++ b/src/query/tests/histogramTest.cpp @@ -5,6 +5,10 @@ #include "taos.h" #include "qHistogram.h" + +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" + namespace { void doHistogramAddTest() { SHistogramInfo* pHisto = NULL; diff --git a/src/query/tests/patternMatchTest.cpp b/src/query/tests/patternMatchTest.cpp index f3e0d3e119259d0a7cc8a94a6b43f4c71558bf78..091604c65c0e8b7fcf998fdd69f6f82f101f8157 100644 --- a/src/query/tests/patternMatchTest.cpp +++ b/src/query/tests/patternMatchTest.cpp @@ -6,6 +6,9 @@ #include "qAggMain.h" #include "tcompare.h" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" + TEST(testCase, patternMatchTest) { SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; diff --git a/src/query/tests/percentileTest.cpp b/src/query/tests/percentileTest.cpp index 104bfb3c06a9613bafcd4e0b3f39af4f9d102b04..1b6951201af5908378fb253b38cea01de1210d57 100644 --- a/src/query/tests/percentileTest.cpp +++ b/src/query/tests/percentileTest.cpp @@ -7,6 +7,9 @@ #include "qPercentile.h" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" + namespace { tMemBucket *createBigIntDataBucket(int32_t start, int32_t end) { tMemBucket *pBucket = tMemBucketCreate(sizeof(int64_t), TSDB_DATA_TYPE_BIGINT, start, end); diff --git a/src/query/tests/resultBufferTest.cpp b/src/query/tests/resultBufferTest.cpp index 491d75ccb9c8104a8e7760aa15918bd212646de6..54ac0bf4e5c78f2fcc7f0e3271eb3409ea072db7 100644 --- a/src/query/tests/resultBufferTest.cpp +++ b/src/query/tests/resultBufferTest.cpp @@ -6,6 +6,9 @@ #include "taos.h" #include "tsdb.h" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" + namespace { // simple test void simpleTest() { diff --git a/src/query/tests/tsBufTest.cpp b/src/query/tests/tsBufTest.cpp index dd7f03a494ac37229b117c678fc461d455067850..04c5a152520d08329408253af271c4d43c5c0fe3 100644 --- a/src/query/tests/tsBufTest.cpp +++ b/src/query/tests/tsBufTest.cpp @@ -9,6 +9,10 @@ #include "ttoken.h" #include "tutil.h" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" + namespace { /** * diff --git a/src/query/tests/unitTest.cpp b/src/query/tests/unitTest.cpp index 33ba8200d3afb9cff00f150ab5bef799f3fa1e86..e5487a061d8c2b79fbd7d321c256443c3ddab97b 100644 --- a/src/query/tests/unitTest.cpp +++ b/src/query/tests/unitTest.cpp @@ -6,14 +6,17 @@ #include "taos.h" #include "tsdb.h" +#pragma GCC diagnostic ignored "-Wwrite-strings" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic ignored "-Wunused-variable" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#pragma GCC diagnostic ignored "-Wsign-compare" + #include "../../client/inc/tscUtil.h" #include "tutil.h" #include "tvariant.h" #include "ttokendef.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wwrite-strings" - namespace { int32_t testValidateName(char* name) { SStrToken token = {0}; diff --git a/src/rpc/CMakeLists.txt b/src/rpc/CMakeLists.txt index f94b4aeb6d21277b6b845587cd35a2c98e0bc0b0..14b77356baa4b87a201e6ff10e785db99cbd47a6 100644 --- a/src/rpc/CMakeLists.txt +++ b/src/rpc/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 605f7d2a326f9b2a66865d9ad0edc2987eb57f81..e958a8e5ec5b6542d609028ee052d21a9a84d397 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1189,7 +1189,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead, SRpcReqConte } rpcSendReqToServer(pRpc, pContext); rpcFreeCont(rpcMsg.pCont); - } else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY) { + } else if (pHead->code == TSDB_CODE_RPC_NOT_READY || pHead->code == TSDB_CODE_APP_NOT_READY || pHead->code == TSDB_CODE_DND_EXITING) { pContext->code = pHead->code; rpcProcessConnError(pContext, NULL); rpcFreeCont(rpcMsg.pCont); diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 029629eff0e10f5bc8812f3df8998b24a507931d..e9feeef9d339a5b1a96e41fd35fb6c62e13ed94b 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -242,6 +242,7 @@ static void *taosAcceptTcpConnection(void *arg) { pServerObj = (SServerObj *)arg; tDebug("%s TCP server is ready, ip:0x%x:%hu", pServerObj->label, pServerObj->ip, pServerObj->port); + setThreadName("acceptTcpConn"); while (1) { socklen_t addrlen = sizeof(caddr); @@ -528,6 +529,11 @@ static void *taosProcessTcpData(void *param) { SFdObj *pFdObj; struct epoll_event events[maxEvents]; SRecvInfo recvInfo; + char name[16]; + + memset(name, 0, sizeof(name)); + snprintf(name, 16, "%s-tcpData", pThreadObj->label); + setThreadName(name); while (1) { int fdNum = epoll_wait(pThreadObj->pollFd, events, maxEvents, TAOS_EPOLL_WAIT_TIME); diff --git a/src/rpc/src/rpcUdp.c b/src/rpc/src/rpcUdp.c index 7a46dbe5c3e1238da93043f8dc97047f84ff72e8..086a390cb8ea2a95f576cb1bff81dfc79769863a 100644 --- a/src/rpc/src/rpcUdp.c +++ b/src/rpc/src/rpcUdp.c @@ -195,6 +195,8 @@ static void *taosRecvUdpData(void *param) { tDebug("%s UDP thread is created, index:%d", pConn->label, pConn->index); char *msg = pConn->buffer; + setThreadName("recvUdpData"); + while (1) { dataLen = recvfrom(pConn->fd, pConn->buffer, RPC_MAX_UDP_SIZE, 0, (struct sockaddr *)&sourceAdd, &addLen); if (dataLen <= 0) { diff --git a/src/rpc/test/CMakeLists.txt b/src/rpc/test/CMakeLists.txt index c10cea6c9dd8c53ab8608c8a736795f2318059d8..a32ac9943d08fe00427ec58520809b4f04657315 100644 --- a/src/rpc/test/CMakeLists.txt +++ b/src/rpc/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index faa6d40da398045e43dba3bbdbaf9ef6c7ccb1ff..de30114bd1c7fa9687a6d75bca3d7158137e29e4 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -47,6 +47,8 @@ static int tcount = 0; static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; + + setThreadName("sendCliReq"); tDebug("thread:%d, start to send request", pInfo->index); diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index a152d8e4a51502357530267acb924c74426c9509..3e94a56efb3494ac5fe1942245abd0bad8815ee7 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -39,8 +39,10 @@ static int terror = 0; static void *sendRequest(void *param) { SInfo *pInfo = (SInfo *)param; - SRpcMsg rpcMsg, rspMsg; + SRpcMsg rpcMsg, rspMsg; + setThreadName("sendSrvReq"); + tDebug("thread:%d, start to send request", pInfo->index); while ( pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt index 521f51ceb71245f96855d71e649e697eb7591df4..2cd84c7c3fff63a702d99d8b2dc45303f17528ef 100644 --- a/src/sync/CMakeLists.txt +++ b/src/sync/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index ec6dfcbc823426d4c535b540188e9e53a656b14b..411e706fb11a7052f5ed3ad162bd3201e212e552 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -132,6 +132,7 @@ void * syncRestoreData(void *param); int32_t syncSaveIntoBuffer(SSyncPeer *pPeer, SWalHead *pHead); void syncRestartConnection(SSyncPeer *pPeer); void syncBroadcastStatus(SSyncNode *pNode); +uint32_t syncResolvePeerFqdn(SSyncPeer *pPeer); SSyncPeer *syncAcquirePeer(int64_t rid); void syncReleasePeer(SSyncPeer *pPeer); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index d34536543c32dd8129375cc93902648e9566239a..e9b24315d095c3a379f9702079f7c93be5149282 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -559,7 +559,8 @@ static void syncClosePeerConn(SSyncPeer *pPeer) { static void syncRemovePeer(SSyncPeer *pPeer) { sInfo("%s, it is removed", pPeer->id); - pPeer->ip = 0; + //pPeer->ip = 0; + pPeer->fqdn[0] = '\0'; syncClosePeerConn(pPeer); //taosRemoveRef(tsPeerRefId, pPeer->rid); syncReleasePeer(pPeer); @@ -585,20 +586,31 @@ static void syncStopCheckPeerConn(SSyncPeer *pPeer) { sDebug("%s, stop check peer connection", pPeer->id); } +uint32_t syncResolvePeerFqdn(SSyncPeer *pPeer) { + uint32_t ip = taosGetIpv4FromFqdn(pPeer->fqdn); + if (ip == 0xFFFFFFFF) { + sError("failed to resolve peer fqdn:%s since %s", pPeer->fqdn, strerror(errno)); + terrno = TSDB_CODE_RPC_FQDN_ERROR; + return 0; + } + + return ip; +} + static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) { - uint32_t ip = taosGetIpv4FromFqdn(pInfo->nodeFqdn); + /*uint32_t ip = taosGetIpv4FromFqdn(pInfo->nodeFqdn); if (ip == 0xFFFFFFFF) { sError("failed to add peer, can resolve fqdn:%s since %s", pInfo->nodeFqdn, strerror(errno)); terrno = TSDB_CODE_RPC_FQDN_ERROR; return NULL; } - + */ SSyncPeer *pPeer = calloc(1, sizeof(SSyncPeer)); if (pPeer == NULL) return NULL; pPeer->nodeId = pInfo->nodeId; tstrncpy(pPeer->fqdn, pInfo->nodeFqdn, sizeof(pPeer->fqdn)); - pPeer->ip = ip; + //pPeer->ip = ip; pPeer->port = pInfo->nodePort; pPeer->fqdn[sizeof(pPeer->fqdn) - 1] = 0; snprintf(pPeer->id, sizeof(pPeer->id), "vgId:%d, nodeId:%d", pNode->vgId, pPeer->nodeId); @@ -857,14 +869,14 @@ static void syncRestartPeer(SSyncPeer *pPeer) { sDebug("%s, peer conn is restart and set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn); - if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) { + if (pPeer->nodeId == 0 || ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) { sDebug("%s, check peer connection in 1000 ms", pPeer->id); taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer); } } void syncRestartConnection(SSyncPeer *pPeer) { - if (pPeer->ip == 0) return; + if (pPeer->fqdn[0] == '\0') return; if (syncAcquirePeer(pPeer->rid) == NULL) return; @@ -878,7 +890,7 @@ static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; sInfo("%s, sync-req is received", pPeer->id); - if (pPeer->ip == 0) return; + if (pPeer->fqdn[0] == '\0') return; if (nodeRole != TAOS_SYNC_ROLE_MASTER) { sError("%s, I am not master anymore", pPeer->id); @@ -1090,7 +1102,7 @@ static int32_t syncProcessPeerMsg(int64_t rid, void *buffer) { } static int32_t syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId) { - if (pPeer->peerFd < 0 || pPeer->ip == 0) { + if (pPeer->peerFd < 0 || pPeer->fqdn[0] == '\0') { sDebug("%s, failed to send status msg, restart fd:%d", pPeer->id, pPeer->peerFd); syncRestartConnection(pPeer); return -1; @@ -1135,7 +1147,13 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { return; } - SOCKET connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); + uint32_t ip = syncResolvePeerFqdn(pPeer); + if (!ip) { + taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer); + return; + } + + SOCKET connFd = taosOpenTcpClientSocket(ip, pPeer->port, 0); if (connFd <= 0) { sDebug("%s, failed to open tcp socket since %s", pPeer->id, strerror(errno)); taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, (void *)pPeer->rid, tsSyncTmrCtrl, &pPeer->timer); diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index c0d66316cd5b802ddcaddf1015d4ceca1aa3b2c5..bf9d5201a062e048365c2887b7b6a6d70b40547f 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -263,6 +263,7 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { } void *syncRestoreData(void *param) { + setThreadName("syncRestoreData"); int64_t rid = (int64_t)param; SSyncPeer *pPeer = syncAcquirePeer(rid); if (pPeer == NULL) { diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 505ba68c41f2c2373dba3322b6ed31ba0ac853f1..89fdda0686ffc6d7d5b372def92e48c6cf06c2ab 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -415,6 +415,7 @@ static int32_t syncRetrieveDataStepByStep(SSyncPeer *pPeer) { } void *syncRetrieveData(void *param) { + setThreadName("syncRetrievData"); int64_t rid = (int64_t)param; SSyncPeer *pPeer = syncAcquirePeer(rid); if (pPeer == NULL) { @@ -422,6 +423,12 @@ void *syncRetrieveData(void *param) { return NULL; } + uint32_t ip = syncResolvePeerFqdn(pPeer); + if (!ip) { + syncReleasePeer(pPeer); + return NULL; + } + SSyncNode *pNode = pPeer->pSyncNode; taosBlockSIGPIPE(); @@ -430,7 +437,7 @@ void *syncRetrieveData(void *param) { if (pNode->notifyFlowCtrlFp) (*pNode->notifyFlowCtrlFp)(pNode->vgId, pPeer->numOfRetrieves); - pPeer->syncFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); + pPeer->syncFd = taosOpenTcpClientSocket(ip, pPeer->port, 0); if (pPeer->syncFd < 0) { sError("%s, failed to open socket to sync", pPeer->id); } else { diff --git a/src/sync/src/syncTcp.c b/src/sync/src/syncTcp.c index 3ad9e9bba01eb861c48fa121b154af3e7838dda0..698245f9e408281e2a7c41da2d6228a1ec12217d 100644 --- a/src/sync/src/syncTcp.c +++ b/src/sync/src/syncTcp.c @@ -195,6 +195,8 @@ static void *syncProcessTcpData(void *param) { SConnObj * pConn = NULL; struct epoll_event events[maxEvents]; + setThreadName("syncTcpData"); + void *buffer = malloc(pInfo->bufferSize); taosBlockSIGPIPE(); @@ -257,6 +259,7 @@ static void *syncAcceptPeerTcpConnection(void *argv) { SPoolInfo *pInfo = &pPool->info; taosBlockSIGPIPE(); + setThreadName("acceptTcpConn"); while (1) { struct sockaddr_in clientAddr; diff --git a/src/sync/test/CMakeLists.txt b/src/sync/test/CMakeLists.txt index f2b05ab2263c0d80bc870981f86933922de639e4..a5ab8191371ce97ecbaf9ef4dc8dbace6a6c4802 100644 --- a/src/sync/test/CMakeLists.txt +++ b/src/sync/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/sync/test/syncClient.c b/src/sync/test/syncClient.c index 23ea54ee0c19b6ad2f93d7577d8d711874b10968..303d2376effffa3a3b2dc01580352a43aeaac9d8 100644 --- a/src/sync/test/syncClient.c +++ b/src/sync/test/syncClient.c @@ -48,6 +48,8 @@ void *sendRequest(void *param) { SInfo * pInfo = (SInfo *)param; SRpcMsg rpcMsg = {0}; + setThreadName("sendCliReq"); + uDebug("thread:%d, start to send request", pInfo->index); while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) { diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index eeaa6a08c2e47d103b62d6023dde74341585f65f..a3d06966488b95a561c9bc688b49f7ceceb87248 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -178,6 +178,8 @@ void *processWriteQueue(void *param) { int type; void *item; + setThreadName("writeQ"); + while (1) { int ret = taosReadQitem(qhandle, &type, &item); if (ret <= 0) { diff --git a/src/tfs/CMakeLists.txt b/src/tfs/CMakeLists.txt index b435c84366fb47bd137b1c13bc98eab625bbcc66..7f956f07a21ed52363fc2072b01ad0853621712b 100644 --- a/src/tfs/CMakeLists.txt +++ b/src/tfs/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/tfs/src/tfs.c b/src/tfs/src/tfs.c index f78535b8ed5bd17cb232fbcbc73f9831db099547..9dc68dcdfdb73fec46bebd4d9b27e361cfc2d570 100644 --- a/src/tfs/src/tfs.c +++ b/src/tfs/src/tfs.c @@ -480,11 +480,13 @@ static int tfsFormatDir(char *idir, char *odir) { return -1; } - if (realpath(wep.we_wordv[0], odir) == NULL) { + char tmp[PATH_MAX] = {0}; + if (realpath(wep.we_wordv[0], tmp) == NULL) { terrno = TAOS_SYSTEM_ERROR(errno); wordfree(&wep); return -1; } + strcpy(odir, tmp); wordfree(&wep); return 0; diff --git a/src/tsdb/CMakeLists.txt b/src/tsdb/CMakeLists.txt index 8080a61a6c9b10b78d965a953765250e0a157fb6..c5b77df5a25f9f0b1e9294228520f171b9befddd 100644 --- a/src/tsdb/CMakeLists.txt +++ b/src/tsdb/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(inc) diff --git a/src/tsdb/inc/tsdbFile.h b/src/tsdb/inc/tsdbFile.h index a2be99e279c7a80baf7c24f460f71899df6b2a9a..3913a97c3c4d5c606999341e4f7e573c8fc67d63 100644 --- a/src/tsdb/inc/tsdbFile.h +++ b/src/tsdb/inc/tsdbFile.h @@ -350,8 +350,8 @@ static FORCE_INLINE int tsdbCopyDFileSet(SDFileSet* pSrc, SDFileSet* pDest) { } static FORCE_INLINE void tsdbGetFidKeyRange(int days, int8_t precision, int fid, TSKEY* minKey, TSKEY* maxKey) { - *minKey = fid * days * tsMsPerDay[precision]; - *maxKey = *minKey + days * tsMsPerDay[precision] - 1; + *minKey = fid * days * tsTickPerDay[precision]; + *maxKey = *minKey + days * tsTickPerDay[precision] - 1; } static FORCE_INLINE bool tsdbFSetIsOk(SDFileSet* pSet) { diff --git a/src/tsdb/inc/tsdbMemTable.h b/src/tsdb/inc/tsdbMemTable.h index babb7024b2500f3b8418fa5404bb593c768d8f1f..67e9976c704e15ae33174ff50ffdf1a289b5182a 100644 --- a/src/tsdb/inc/tsdbMemTable.h +++ b/src/tsdb/inc/tsdbMemTable.h @@ -71,27 +71,27 @@ int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxK TKEY* filterKeys, int nFilterKeys, bool keepDup, SMergeInfo* pMergeInfo); void* tsdbCommitData(STsdbRepo* pRepo); -static FORCE_INLINE SDataRow tsdbNextIterRow(SSkipListIterator* pIter) { +static FORCE_INLINE SMemRow tsdbNextIterRow(SSkipListIterator* pIter) { if (pIter == NULL) return NULL; SSkipListNode* node = tSkipListIterGet(pIter); if (node == NULL) return NULL; - return (SDataRow)SL_GET_NODE_DATA(node); + return (SMemRow)SL_GET_NODE_DATA(node); } static FORCE_INLINE TSKEY tsdbNextIterKey(SSkipListIterator* pIter) { - SDataRow row = tsdbNextIterRow(pIter); + SMemRow row = tsdbNextIterRow(pIter); if (row == NULL) return TSDB_DATA_TIMESTAMP_NULL; - return dataRowKey(row); + return memRowKey(row); } static FORCE_INLINE TKEY tsdbNextIterTKey(SSkipListIterator* pIter) { - SDataRow row = tsdbNextIterRow(pIter); + SMemRow row = tsdbNextIterRow(pIter); if (row == NULL) return TKEY_NULL; - return dataRowTKey(row); + return memRowTKey(row); } #endif /* _TD_TSDB_MEMTABLE_H_ */ \ No newline at end of file diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h index 45bbd5a7c6911fed4ea7309a77d3ac144109a34b..9a8de01f71331d091e75fedac61cc333dca165cc 100644 --- a/src/tsdb/inc/tsdbMeta.h +++ b/src/tsdb/inc/tsdbMeta.h @@ -32,7 +32,7 @@ typedef struct STable { void* eventHandler; // TODO void* streamHandler; // TODO TSKEY lastKey; - SDataRow lastRow; + SMemRow lastRow; char* sql; void* cqhandle; SRWLatch latch; // TODO: implementa latch functions @@ -148,7 +148,7 @@ static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) { } static FORCE_INLINE TSKEY tsdbGetTableLastKeyImpl(STable* pTable) { - ASSERT(pTable->lastRow == NULL || pTable->lastKey == dataRowKey(pTable->lastRow)); + ASSERT((pTable->lastRow == NULL) || (pTable->lastKey == memRowKey(pTable->lastRow))); return pTable->lastKey; } diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 289bb518e993a79948b867d38841f8cd95fc21fb..4d08562a465a589567ce94f605645aabc51c663e 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -17,9 +17,9 @@ #define TSDB_MAX_SUBBLOCKS 8 static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) { if (key < 0) { - return (int)((key + 1) / tsMsPerDay[precision] / days - 1); + return (int)((key + 1) / tsTickPerDay[precision] / days - 1); } else { - return (int)((key / tsMsPerDay[precision] / days)); + return (int)((key / tsTickPerDay[precision] / days)); } } @@ -368,9 +368,9 @@ void tsdbGetRtnSnap(STsdbRepo *pRepo, SRtn *pRtn) { TSKEY minKey, midKey, maxKey, now; now = taosGetTimestamp(pCfg->precision); - minKey = now - pCfg->keep * tsMsPerDay[pCfg->precision]; - midKey = now - pCfg->keep2 * tsMsPerDay[pCfg->precision]; - maxKey = now - pCfg->keep1 * tsMsPerDay[pCfg->precision]; + minKey = now - pCfg->keep * tsTickPerDay[pCfg->precision]; + midKey = now - pCfg->keep2 * tsTickPerDay[pCfg->precision]; + maxKey = now - pCfg->keep1 * tsTickPerDay[pCfg->precision]; pRtn->minKey = minKey; pRtn->minFid = (int)(TSDB_KEY_FID(minKey, pCfg->daysPerFile, pCfg->precision)); @@ -1038,7 +1038,8 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDataCo SDataCol * pDataCol = pDataCols->cols + ncol; SBlockCol *pBlockCol = pBlockData->cols + nColsNotAllNull; - if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it + // if (isNEleNull(pDataCol, rowsToWrite)) { // all data to commit are NULL, just ignore it + if (isAllRowsNull(pDataCol)) { // all data to commit are NULL, just ignore it continue; } @@ -1382,12 +1383,12 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt while (true) { key1 = (*iter >= pDataCols->numOfRows) ? INT64_MAX : dataColsKeyAt(pDataCols, *iter); bool isRowDel = false; - SDataRow row = tsdbNextIterRow(pCommitIter->pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + SMemRow row = tsdbNextIterRow(pCommitIter->pIter); + if (row == NULL || memRowKey(row) > maxKey) { key2 = INT64_MAX; } else { - key2 = dataRowKey(row); - isRowDel = dataRowDeleted(row); + key2 = memRowKey(row); + isRowDel = memRowDeleted(row); } if (key1 == INT64_MAX && key2 == INT64_MAX) break; @@ -1402,24 +1403,24 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt (*iter)++; } else if (key1 > key2) { if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); ASSERT(pSchema != NULL); } - tdAppendDataRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget); } tSkipListIterNext(pCommitIter->pIter); } else { if (update) { if (!isRowDel) { - if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, dataRowVersion(row)); + if (pSchema == NULL || schemaVersion(pSchema) != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaImpl(pCommitIter->pTable, false, false, memRowVersion(row)); ASSERT(pSchema != NULL); } - tdAppendDataRowToDataCol(row, pSchema, pTarget); + tdAppendMemRowToDataCol(row, pSchema, pTarget); } } else { ASSERT(!isRowDel); diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index e25014bc1e8f2456eece3d517096cfee66886800..e45ac05e979b119ee7995cf845a6f242a9953cb3 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -158,6 +158,8 @@ static void *tsdbLoopCommit(void *arg) { STsdbRepo * pRepo = NULL; TSDB_REQ_T req; + setThreadName("tsdbCommit"); + while (true) { pthread_mutex_lock(&(pQueue->lock)); @@ -208,4 +210,4 @@ void tsdbDecCommitRef(int vgId) { int refCount = atomic_sub_fetch_32(&tsCommitQueue.refCount, 1); pthread_cond_broadcast(&(tsCommitQueue.queueNotEmpty)); tsdbDebug("vgId:%d, dec commit queue ref to %d", vgId, refCount); -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index 35e99983232161b48ab0b9d07847266ec0aff404..76f5b21fbde52e3b00f46c83a24ab7f56fa25625 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -771,7 +771,7 @@ int tsdbLoadMetaCache(STsdbRepo *pRepo, bool recoverMeta) { int64_t maxBufSize = 0; SMFInfo minfo; - taosHashEmpty(pfs->metaCache); + taosHashClear(pfs->metaCache); // No meta file, just return if (pfs->cstatus->pmf == NULL) return 0; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index d44f8ec74874fa990992fc008671878088d872f0..f3a7c4b7eecfab36eb5249107e88d0af60992f62 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -128,6 +128,8 @@ int tsdbCloseRepo(STsdbRepo *repo, int toCommit) { tsdbSyncCommit(repo); } + tsem_wait(&(pRepo->readyToCommit)); + tsdbUnRefMemTable(pRepo, pRepo->mem); tsdbUnRefMemTable(pRepo, pRepo->imem); pRepo->mem = NULL; @@ -639,7 +641,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea int numColumns; int32_t blockIdx; SDataStatis* pBlockStatis = NULL; - SDataRow row = NULL; + SMemRow row = NULL; // restore last column data with last schema int err = 0; @@ -655,13 +657,15 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea } } - row = taosTMalloc(dataRowMaxBytesFromSchema(pSchema)); + row = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); if (row == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; err = -1; goto out; } - tdInitDataRow(row, pSchema); + + memRowSetType(row, SMEM_ROW_DATA); + tdInitDataRow(memRowDataBody(row), pSchema); // first load block index info if (tsdbLoadBlockInfo(pReadh, NULL) < 0) { @@ -718,9 +722,10 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea // OK,let's load row from backward to get not-null column for (int32_t rowId = pBlock->numOfRows - 1; rowId >= 0; rowId--) { SDataCol *pDataCol = pReadh->pDCols[0]->cols + i; - tdAppendColVal(row, tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->bytes, pCol->offset); + const void* pColData = tdGetColDataOfRow(pDataCol, rowId); + tdAppendColVal(memRowDataBody(row), pColData, pCol->type, pCol->offset); //SDataCol *pDataCol = readh.pDCols[0]->cols + j; - void* value = tdGetRowDataOfCol(row, (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); + void *value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); if (isNull(value, pCol->type)) { continue; } @@ -731,17 +736,18 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea continue; } // save not-null column + uint16_t bytes = IS_VAR_DATA_TYPE(pCol->type) ? varDataTLen(pColData) : pCol->bytes; SDataCol *pLastCol = &(pTable->lastCols[idx]); - pLastCol->pData = malloc(pCol->bytes); - pLastCol->bytes = pCol->bytes; + pLastCol->pData = malloc(bytes); + pLastCol->bytes = bytes; pLastCol->colId = pCol->colId; - memcpy(pLastCol->pData, value, pCol->bytes); + memcpy(pLastCol->pData, value, bytes); // save row ts(in column 0) pDataCol = pReadh->pDCols[0]->cols + 0; pCol = schemaColAt(pSchema, 0); - tdAppendColVal(row, tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->bytes, pCol->offset); - pLastCol->ts = dataRowKey(row); + tdAppendColVal(memRowDataBody(row), tdGetColDataOfRow(pDataCol, rowId), pCol->type, pCol->offset); + pLastCol->ts = memRowKey(row); pTable->restoreColumnNum += 1; @@ -777,18 +783,18 @@ static int tsdbRestoreLastRow(STsdbRepo *pRepo, STable *pTable, SReadH* pReadh, // Get the data in row STSchema *pSchema = tsdbGetTableSchema(pTable); - pTable->lastRow = taosTMalloc(dataRowMaxBytesFromSchema(pSchema)); + pTable->lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema)); if (pTable->lastRow == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - - tdInitDataRow(pTable->lastRow, pSchema); + memRowSetType(pTable->lastRow, SMEM_ROW_DATA); + tdInitDataRow(memRowDataBody(pTable->lastRow), pSchema); for (int icol = 0; icol < schemaNCols(pSchema); icol++) { STColumn *pCol = schemaColAt(pSchema, icol); SDataCol *pDataCol = pReadh->pDCols[0]->cols + icol; - tdAppendColVal(pTable->lastRow, tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, pCol->bytes, - pCol->offset); + tdAppendColVal(memRowDataBody(pTable->lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type, + pCol->offset); } return 0; @@ -987,4 +993,4 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) { } return 0; -} \ No newline at end of file +} diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c index 9d8b1ca7f2889f40b696f04a608dd166adf6eac6..ee9dadd9b7e2e560480d289bb029fbf6c1fc8d88 100644 --- a/src/tsdb/src/tsdbMemTable.c +++ b/src/tsdb/src/tsdbMemTable.c @@ -21,7 +21,7 @@ typedef struct { int32_t totalLen; int32_t len; - SDataRow row; + SMemRow row; } SSubmitBlkIter; typedef struct { @@ -36,20 +36,19 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable); static void tsdbFreeTableData(STableData *pTableData); static char * tsdbGetTsTupleKey(const void *data); static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables); -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row); +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row); static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter); -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); +static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter); static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg); static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t *affectedrows); -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow); +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow); static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter); static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock); static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable); static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **rows, int rowCounter); static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter); -static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row); - -static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, +static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row); +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey, TSKEY now); int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pRsp) { @@ -354,7 +353,7 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey TSKEY fKey = 0; bool isRowDel = false; int filterIter = 0; - SDataRow row = NULL; + SMemRow row = NULL; SMergeInfo mInfo; if (pMergeInfo == NULL) pMergeInfo = &mInfo; @@ -365,12 +364,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey if (pCols) tdResetDataCols(pCols); row = tsdbNextIterRow(pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + if (row == NULL || memRowKey(row) > maxKey) { rowKey = INT64_MAX; isRowDel = false; } else { - rowKey = dataRowKey(row); - isRowDel = dataRowDeleted(row); + rowKey = memRowKey(row); + isRowDel = memRowDeleted(row); } if (filterIter >= nFilterKeys) { @@ -407,12 +406,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey tSkipListIterNext(pIter); row = tsdbNextIterRow(pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + if (row == NULL || memRowKey(row) > maxKey) { rowKey = INT64_MAX; isRowDel = false; } else { - rowKey = dataRowKey(row); - isRowDel = dataRowDeleted(row); + rowKey = memRowKey(row); + isRowDel = memRowDeleted(row); } } else { if (isRowDel) { @@ -437,12 +436,12 @@ int tsdbLoadDataFromCache(STable *pTable, SSkipListIterator *pIter, TSKEY maxKey tSkipListIterNext(pIter); row = tsdbNextIterRow(pIter); - if (row == NULL || dataRowKey(row) > maxKey) { + if (row == NULL || memRowKey(row) > maxKey) { rowKey = INT64_MAX; isRowDel = false; } else { - rowKey = dataRowKey(row); - isRowDel = dataRowDeleted(row); + rowKey = memRowKey(row); + isRowDel = memRowDeleted(row); } filterIter++; @@ -548,7 +547,7 @@ static void tsdbFreeTableData(STableData *pTableData) { } } -static char *tsdbGetTsTupleKey(const void *data) { return dataRowTuple((SDataRow)data); } +static char *tsdbGetTsTupleKey(const void *data) { return memRowTuple((SMemRow)data); } static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { ASSERT(pMemTable->maxTables < maxTables); @@ -572,17 +571,17 @@ static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) { return 0; } -static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SDataRow row) { +static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema **ppSchema, SMemRow row) { if (pCols) { - if (*ppSchema == NULL || schemaVersion(*ppSchema) != dataRowVersion(row)) { - *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, dataRowVersion(row)); + if (*ppSchema == NULL || schemaVersion(*ppSchema) != memRowVersion(row)) { + *ppSchema = tsdbGetTableSchemaImpl(pTable, false, false, memRowVersion(row)); if (*ppSchema == NULL) { ASSERT(false); return -1; } } - tdAppendDataRowToDataCol(row, *ppSchema, pCols); + tdAppendMemRowToDataCol(row, *ppSchema, pCols); } return 0; @@ -592,31 +591,32 @@ static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter) { if (pBlock->dataLen <= 0) return -1; pIter->totalLen = pBlock->dataLen; pIter->len = 0; - pIter->row = (SDataRow)(pBlock->data+pBlock->schemaLen); + pIter->row = (SMemRow)(pBlock->data + pBlock->schemaLen); return 0; } -static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { - SDataRow row = pIter->row; +static SMemRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) { + SMemRow row = pIter->row; // firstly, get current row if (row == NULL) return NULL; - pIter->len += dataRowLen(row); - if (pIter->len >= pIter->totalLen) { + pIter->len += memRowTLen(row); + if (pIter->len >= pIter->totalLen) { // reach the end pIter->row = NULL; } else { - pIter->row = (char *)row + dataRowLen(row); + pIter->row = (char *)row + memRowTLen(row); // secondly, move to next row } return row; } -static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SDataRow row, TSKEY minKey, TSKEY maxKey, +static FORCE_INLINE int tsdbCheckRowRange(STsdbRepo *pRepo, STable *pTable, SMemRow row, TSKEY minKey, TSKEY maxKey, TSKEY now) { - if (dataRowKey(row) < minKey || dataRowKey(row) > maxKey) { + TSKEY rowKey = memRowKey(row); + if (rowKey < minKey || rowKey > maxKey) { tsdbError("vgId:%d table %s tid %d uid %" PRIu64 " timestamp is out of range! now %" PRId64 " minKey %" PRId64 " maxKey %" PRId64 " row key %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), now, minKey, maxKey, - dataRowKey(row)); + rowKey); terrno = TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE; return -1; } @@ -630,10 +630,10 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) { SSubmitMsgIter msgIter = {0}; SSubmitBlk * pBlock = NULL; SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; + SMemRow row = NULL; TSKEY now = taosGetTimestamp(pRepo->config.precision); - TSKEY minKey = now - tsMsPerDay[pRepo->config.precision] * pRepo->config.keep; - TSKEY maxKey = now + tsMsPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; + TSKEY minKey = now - tsTickPerDay[pRepo->config.precision] * pRepo->config.keep; + TSKEY maxKey = now + tsTickPerDay[pRepo->config.precision] * pRepo->config.daysPerFile; terrno = TSDB_CODE_SUCCESS; pMsg->length = htonl(pMsg->length); @@ -698,7 +698,7 @@ static int tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, int32_t * int64_t points = 0; STable * pTable = NULL; SSubmitBlkIter blkIter = {0}; - SDataRow row = NULL; + SMemRow row = NULL; void * rows[TSDB_MAX_INSERT_BATCH] = {0}; int rowCounter = 0; @@ -744,10 +744,10 @@ _err: return -1; } -static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void **ppRow) { +static int tsdbCopyRowToMem(STsdbRepo *pRepo, SMemRow row, STable *pTable, void **ppRow) { STsdbCfg * pCfg = &pRepo->config; - TKEY tkey = dataRowTKey(row); - TSKEY key = dataRowKey(row); + TKEY tkey = memRowTKey(row); + TSKEY key = memRowKey(row); bool isRowDelete = TKEY_IS_DELETED(tkey); if (isRowDelete) { @@ -765,15 +765,15 @@ static int tsdbCopyRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable, void } } - void *pRow = tsdbAllocBytes(pRepo, dataRowLen(row)); + void *pRow = tsdbAllocBytes(pRepo, memRowTLen(row)); if (pRow == NULL) { - tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %d bytes since %s", - REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), dataRowLen(row), tstrerror(terrno)); + tsdbError("vgId:%d failed to insert row with key %" PRId64 " to table %s while allocate %" PRIu32 " bytes since %s", + REPO_ID(pRepo), key, TABLE_CHAR_NAME(pTable), memRowTLen(row), tstrerror(terrno)); return -1; } - dataRowCpy(pRow, row); - ppRow[0] = pRow; + memRowCpy(pRow, row); + ppRow[0] = pRow; // save the memory address of data rows tsdbTrace("vgId:%d a row is %s table %s tid %d uid %" PRIu64 " key %" PRIu64, REPO_ID(pRepo), isRowDelete ? "deleted from" : "updated in", TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), @@ -932,13 +932,15 @@ static int tsdbInsertDataToTableImpl(STsdbRepo *pRepo, STable *pTable, void **ro int64_t osize = SL_SIZE(pTableData->pData); tSkipListPutBatch(pTableData->pData, rows, rowCounter); int64_t dsize = SL_SIZE(pTableData->pData) - osize; + TSKEY keyFirstRow = memRowKey(rows[0]); + TSKEY keyLastRow = memRowKey(rows[rowCounter - 1]); - if (pMemTable->keyFirst > dataRowKey(rows[0])) pMemTable->keyFirst = dataRowKey(rows[0]); - if (pMemTable->keyLast < dataRowKey(rows[rowCounter - 1])) pMemTable->keyLast = dataRowKey(rows[rowCounter - 1]); + if (pMemTable->keyFirst > keyFirstRow) pMemTable->keyFirst = keyFirstRow; + if (pMemTable->keyLast < keyLastRow) pMemTable->keyLast = keyLastRow; pMemTable->numOfRows += dsize; - if (pTableData->keyFirst > dataRowKey(rows[0])) pTableData->keyFirst = dataRowKey(rows[0]); - if (pTableData->keyLast < dataRowKey(rows[rowCounter - 1])) pTableData->keyLast = dataRowKey(rows[rowCounter - 1]); + if (pTableData->keyFirst > keyFirstRow) pTableData->keyFirst = keyFirstRow; + if (pTableData->keyLast < keyLastRow) pTableData->keyLast = keyLastRow; pTableData->numOfRows += dsize; // update table latest info @@ -954,8 +956,8 @@ static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { STsdbBufPool *pBufPool = pRepo->pPool; for (int i = rowCounter - 1; i >= 0; --i) { - SDataRow row = (SDataRow)rows[i]; - int bytes = (int)dataRowLen(row); + SMemRow row = (SMemRow)rows[i]; + int bytes = (int)memRowTLen(row); if (pRepo->mem->extraBuffList == NULL) { STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo); @@ -988,21 +990,23 @@ static void tsdbFreeRows(STsdbRepo *pRepo, void **rows, int rowCounter) { } } -static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SDataRow row) { - tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data, dataRowVersion(row)); +static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow row) { + tsdbDebug("vgId:%d updateTableLatestColumn, %s row version:%d", REPO_ID(pRepo), pTable->name->data, + memRowVersion(row)); STSchema* pSchema = tsdbGetTableLatestSchema(pTable); if (tsdbUpdateLastColSchema(pTable, pSchema) < 0) { return; } - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); if (pSchema == NULL) { return; } SDataCol *pLatestCols = pTable->lastCols; + bool isDataRow = isDataRow(row); for (int16_t j = 0; j < schemaNCols(pSchema); j++) { STColumn *pTCol = schemaColAt(pSchema, j); // ignore not exist colId @@ -1010,28 +1014,43 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SDataRow r if (idx == -1) { continue; } - - void* value = tdGetRowDataOfCol(row, (int8_t)pTCol->type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - if (isNull(value, pTCol->type)) { + + void *value = NULL; + + if (isDataRow) { + value = tdGetRowDataOfCol(memRowDataBody(row), (int8_t)pTCol->type, + TD_DATA_ROW_HEAD_SIZE + pTCol->offset); + } else { + // SKVRow + SColIdx *pColIdx = tdGetKVRowIdxOfCol(memRowKvBody(row), pTCol->colId); + if (pColIdx) { + value = tdGetKvRowDataOfCol(memRowKvBody(row), pColIdx->offset); + } + } + + if ((value == NULL) || isNull(value, pTCol->type)) { continue; } SDataCol *pDataCol = &(pLatestCols[idx]); if (pDataCol->pData == NULL) { - pDataCol->pData = malloc(pSchema->columns[j].bytes); - pDataCol->bytes = pSchema->columns[j].bytes; - } else if (pDataCol->bytes < pSchema->columns[j].bytes) { - pDataCol->pData = realloc(pDataCol->pData, pSchema->columns[j].bytes); - pDataCol->bytes = pSchema->columns[j].bytes; + pDataCol->pData = malloc(pTCol->bytes); + pDataCol->bytes = pTCol->bytes; + } else if (pDataCol->bytes < pTCol->bytes) { + pDataCol->pData = realloc(pDataCol->pData, pTCol->bytes); + pDataCol->bytes = pTCol->bytes; } - - memcpy(pDataCol->pData, value, pDataCol->bytes); + // the actual value size + uint16_t bytes = IS_VAR_DATA_TYPE(pTCol->type) ? varDataTLen(value) : pTCol->bytes; + // the actual data size CANNOT larger than column size + assert(pTCol->bytes >= bytes); + memcpy(pDataCol->pData, value, bytes); //tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData); - pDataCol->ts = dataRowKey(row); + pDataCol->ts = memRowKey(row); } } -static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow row) { +static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow row) { STsdbCfg *pCfg = &pRepo->config; // if cacheLastRow config has been reset, free the lastRow @@ -1042,31 +1061,31 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SDataRow TSDB_WUNLOCK_TABLE(pTable); } - if (tsdbGetTableLastKeyImpl(pTable) < dataRowKey(row)) { + if (tsdbGetTableLastKeyImpl(pTable) < memRowKey(row)) { if (CACHE_LAST_ROW(pCfg) || pTable->lastRow != NULL) { - SDataRow nrow = pTable->lastRow; - if (taosTSizeof(nrow) < dataRowLen(row)) { - SDataRow orow = nrow; - nrow = taosTMalloc(dataRowLen(row)); + SMemRow nrow = pTable->lastRow; + if (taosTSizeof(nrow) < memRowTLen(row)) { + SMemRow orow = nrow; + nrow = taosTMalloc(memRowTLen(row)); if (nrow == NULL) { terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return -1; } - dataRowCpy(nrow, row); + memRowCpy(nrow, row); TSDB_WLOCK_TABLE(pTable); - pTable->lastKey = dataRowKey(row); + pTable->lastKey = memRowKey(row); pTable->lastRow = nrow; TSDB_WUNLOCK_TABLE(pTable); taosTZfree(orow); } else { TSDB_WLOCK_TABLE(pTable); - pTable->lastKey = dataRowKey(row); - dataRowCpy(nrow, row); + pTable->lastKey = memRowKey(row); + memRowCpy(nrow, row); TSDB_WUNLOCK_TABLE(pTable); } } else { - pTable->lastKey = dataRowKey(row); + pTable->lastKey = memRowKey(row); } if (CACHE_LAST_NULL_COLUMN(pCfg)) { diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 9d5df9fb32d1eedf187e71b187815a332d8ef182..f61887a1bfe781f51668bc101265df22f204458d 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -796,7 +796,7 @@ static char *getTagIndexKey(const void *pData) { void * res = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId); if (res == NULL) { // treat the column as NULL if we cannot find it - res = getNullValue(pCol->type); + res = (char*)getNullValue(pCol->type); } return res; } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 4ac900e30ef0418e47c970c6cb9da813b6b83104..7a595da39a88c3a0934a28161c803664b03e2507 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -39,6 +39,12 @@ enum { TSDB_QUERY_TYPE_LAST = 2, }; +enum { + TSDB_CACHED_TYPE_NONE = 0, + TSDB_CACHED_TYPE_LASTROW = 1, + TSDB_CACHED_TYPE_LAST = 2, +}; + typedef struct SQueryFilePos { int32_t fid; int32_t slot; @@ -92,6 +98,8 @@ typedef struct SIOCostSummary { int64_t blockLoadTime; int64_t statisInfoLoadTime; int64_t checkForNextTime; + int64_t headFileLoad; + int64_t headFileLoadTime; } SIOCostSummary; typedef struct STsdbQueryHandle { @@ -139,7 +147,7 @@ typedef struct STableGroupSupporter { static STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList); static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList); static int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle); -static int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey); +static int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey); static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle); static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SBlock* pBlock); @@ -280,9 +288,13 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa info.tableId.uid = info.pTableObj->tableId.uid; if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - assert(info.lastKey >= pQueryHandle->window.skey); + if (info.lastKey == INT64_MIN || info.lastKey < pQueryHandle->window.skey) { + info.lastKey = pQueryHandle->window.skey; + } + + assert(info.lastKey >= pQueryHandle->window.skey && info.lastKey <= pQueryHandle->window.ekey); } else { - assert(info.lastKey <= pQueryHandle->window.skey); + assert(info.lastKey >= pQueryHandle->window.ekey && info.lastKey <= pQueryHandle->window.skey); } taosArrayPush(pTableCheckInfo, &info); @@ -339,14 +351,57 @@ static SArray* createCheckInfoFromCheckInfo(STableCheckInfo* pCheckInfo, TSKEY s return pNew; } +static bool emptyQueryTimewindow(STsdbQueryHandle* pQueryHandle) { + assert(pQueryHandle != NULL); + + STimeWindow* w = &pQueryHandle->window; + bool asc = ASCENDING_TRAVERSE(pQueryHandle->order); + + return ((asc && w->skey > w->ekey) || (!asc && w->ekey > w->skey)); +} + +// Update the query time window according to the data time to live(TTL) information, in order to avoid to return +// the expired data to client, even it is queried already. +static int64_t getEarliestValidTimestamp(STsdbRepo* pTsdb) { + STsdbCfg* pCfg = &pTsdb->config; + + int64_t now = taosGetTimestamp(pCfg->precision); + return now - (tsTickPerDay[pCfg->precision] * pCfg->keep) + 1; // needs to add one tick +} + +static void setQueryTimewindow(STsdbQueryHandle* pQueryHandle, STsdbQueryCond* pCond) { + pQueryHandle->window = pCond->twindow; + + bool updateTs = false; + int64_t startTs = getEarliestValidTimestamp(pQueryHandle->pTsdb); + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + if (startTs > pQueryHandle->window.skey) { + pQueryHandle->window.skey = startTs; + pCond->twindow.skey = startTs; + updateTs = true; + } + } else { + if (startTs > pQueryHandle->window.ekey) { + pQueryHandle->window.ekey = startTs; + pCond->twindow.ekey = startTs; + updateTs = true; + } + } + + if (updateTs) { + tsdbDebug("%p update the query time window, old:%" PRId64 " - %" PRId64 ", new:%" PRId64 " - %" PRId64 + ", 0x%" PRIx64, pQueryHandle, pCond->twindow.skey, pCond->twindow.ekey, pQueryHandle->window.skey, + pQueryHandle->window.ekey, pQueryHandle->qId); + } +} + static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, uint64_t qId, SMemRef* pMemRef) { STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); if (pQueryHandle == NULL) { - goto out_of_memory; + goto _end; } pQueryHandle->order = pCond->order; - pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; pQueryHandle->type = TSDB_QUERY_TYPE_ALL; pQueryHandle->cur.fid = INT32_MIN; @@ -354,36 +409,33 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC pQueryHandle->checkFiles = true; pQueryHandle->activeIndex = 0; // current active table index pQueryHandle->qId = qId; - pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; pQueryHandle->locateStart = false; pQueryHandle->pMemRef = pMemRef; + pQueryHandle->loadType = pCond->type; + + pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->loadExternalRow = pCond->loadExternalRows; pQueryHandle->currentLoadExternalRows = pCond->loadExternalRows; - pQueryHandle->loadType = pCond->type; - if (tsdbInitReadH(&pQueryHandle->rhelper, (STsdbRepo*)tsdb) != 0) { - goto out_of_memory; + goto _end; } assert(pCond != NULL && pMemRef != NULL); - if (ASCENDING_TRAVERSE(pCond->order)) { - assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); - } else { - assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); - } + setQueryTimewindow(pQueryHandle, pCond); + if (pCond->numOfCols > 0) { // allocate buffer in order to load data blocks from file pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); if (pQueryHandle->statis == NULL) { - goto out_of_memory; + goto _end; } - pQueryHandle->pColumns = - taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? + // todo: use list instead of array? + pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); if (pQueryHandle->pColumns == NULL) { - goto out_of_memory; + goto _end; } for (int32_t i = 0; i < pCond->numOfCols; ++i) { @@ -392,14 +444,16 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC colInfo.info = pCond->colList[i]; colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); if (colInfo.pData == NULL) { - goto out_of_memory; + goto _end; } + taosArrayPush(pQueryHandle->pColumns, &colInfo); pQueryHandle->statis[i].colId = colInfo.info.colId; } pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); } + STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); @@ -407,7 +461,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC if (pQueryHandle->pDataCols == NULL) { tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; - goto out_of_memory; + goto _end; } tsdbInitDataBlockLoadInfo(&pQueryHandle->dataBlockLoadInfo); @@ -415,7 +469,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC return (TsdbQueryHandleT) pQueryHandle; - out_of_memory: + _end: tsdbCleanupQueryHandle(pQueryHandle); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; @@ -423,6 +477,9 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, uint64_t qId, SMemRef* pRef) { STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qId, pRef); + if (emptyQueryTimewindow(pQueryHandle)) { + return (TsdbQueryHandleT*) pQueryHandle; + } STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); @@ -433,6 +490,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable pQueryHandle->pTableCheckInfo = createCheckInfoFromTableGroup(pQueryHandle, groupList, pMeta, &psTable); if (pQueryHandle->pTableCheckInfo == NULL) { tsdbCleanupQueryHandle(pQueryHandle); + taosArrayDestroy(psTable); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; return NULL; } @@ -446,6 +504,15 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable void tsdbResetQueryHandle(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond) { STsdbQueryHandle* pQueryHandle = queryHandle; + if (emptyQueryTimewindow(pQueryHandle)) { + if (pCond->order != pQueryHandle->order) { + pQueryHandle->order = pCond->order; + SWAP(pQueryHandle->window.skey, pQueryHandle->window.ekey, int64_t); + } + + return; + } + pQueryHandle->order = pCond->order; pQueryHandle->window = pCond->twindow; pQueryHandle->type = TSDB_QUERY_TYPE_ALL; @@ -511,8 +578,6 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); } - - TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) { pCond->twindow = updateLastrowForEachGroup(groupList); @@ -575,7 +640,7 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr size_t numOfGroup = taosArrayGetSize(pGroupList->pGroupList); STableGroupInfo* pNew = calloc(1, sizeof(STableGroupInfo)); - pNew->pGroupList = taosArrayInit(numOfGroup, sizeof(SArray)); + pNew->pGroupList = taosArrayInit(numOfGroup, POINTER_BYTES); for(int32_t i = 0; i < numOfGroup; ++i) { SArray* oneGroup = taosArrayGetP(pGroupList->pGroupList, i); @@ -669,8 +734,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); assert(node != NULL); - SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); - TSKEY key = dataRowKey(row); // first timestamp in buffer + SMemRow row = (SMemRow)SL_GET_NODE_DATA(node); + TSKEY key = memRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast, @@ -691,8 +756,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); assert(node != NULL); - SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); - TSKEY key = dataRowKey(row); // first timestamp in buffer + SMemRow row = (SMemRow)SL_GET_NODE_DATA(node); + TSKEY key = memRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast, @@ -716,19 +781,19 @@ static void destroyTableMemIterator(STableCheckInfo* pCheckInfo) { tSkipListDestroyIter(pCheckInfo->iiter); } -static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { - SDataRow rmem = NULL, rimem = NULL; +static SMemRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update) { + SMemRow rmem = NULL, rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); if (node != NULL) { - rmem = (SDataRow)SL_GET_NODE_DATA(node); + rmem = (SMemRow)SL_GET_NODE_DATA(node); } } if (pCheckInfo->iiter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); if (node != NULL) { - rimem = (SDataRow)SL_GET_NODE_DATA(node); + rimem = (SMemRow)SL_GET_NODE_DATA(node); } } @@ -746,8 +811,8 @@ static SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order return rimem; } - TSKEY r1 = dataRowKey(rmem); - TSKEY r2 = dataRowKey(rimem); + TSKEY r1 = memRowKey(rmem); + TSKEY r2 = memRowKey(rimem); if (r1 == r2) { // data ts are duplicated, ignore the data in mem if (!update) { @@ -826,12 +891,12 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { initTableMemIterator(pHandle, pCheckInfo); } - SDataRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pHandle->order, pCfg->update); if (row == NULL) { return false; } - pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer + pCheckInfo->lastKey = memRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, 0x%"PRIx64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qId); @@ -864,10 +929,10 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio } if (key < 0) { - key -= (daysPerFile * tsMsPerDay[precision]); + key -= (daysPerFile * tsTickPerDay[precision]); } - int64_t fid = (int64_t)(key / (daysPerFile * tsMsPerDay[precision])); // set the starting fileId + int64_t fid = (int64_t)(key / (daysPerFile * tsTickPerDay[precision])); // set the starting fileId if (fid < 0L && llabs(fid) > INT32_MAX) { // data value overflow for INT32 fid = INT32_MIN; } @@ -982,15 +1047,21 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo int32_t code = TSDB_CODE_SUCCESS; *numOfBlocks = 0; + pQueryHandle->cost.headFileLoad += 1; + int64_t s = taosGetTimestampUs(); + size_t numOfTables = 0; if (pQueryHandle->loadType == BLOCK_LOAD_TABLE_SEQ_ORDER) { - code = loadBlockInfo(pQueryHandle, pQueryHandle->activeIndex, numOfBlocks); + code = loadBlockInfo(pQueryHandle, pQueryHandle->activeIndex, numOfBlocks); } else if (pQueryHandle->loadType == BLOCK_LOAD_OFFSET_SEQ_ORDER) { numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo); for (int32_t i = 0; i < numOfTables; ++i) { code = loadBlockInfo(pQueryHandle, i, numOfBlocks); if (code != TSDB_CODE_SUCCESS) { + int64_t e = taosGetTimestampUs(); + + pQueryHandle->cost.headFileLoadTime += (e - s); return code; } } @@ -998,6 +1069,8 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo assert(0); } + int64_t e = taosGetTimestampUs(); + pQueryHandle->cost.headFileLoadTime += (e - s); return code; } @@ -1082,11 +1155,11 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p int32_t code = TSDB_CODE_SUCCESS; /*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo); - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); assert(cur->pos >= 0 && cur->pos <= binfo.rows); - TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL; + TSKEY key = (row != NULL) ? memRowKey(row) : TSKEY_INITIAL_VAL; if (key != TSKEY_INITIAL_VAL) { tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId); } else { @@ -1171,8 +1244,9 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, STableCheckInfo* pCheckInfo, bool* exists) { SQueryFilePos* cur = &pQueryHandle->cur; int32_t code = TSDB_CODE_SUCCESS; + bool asc = ASCENDING_TRAVERSE(pQueryHandle->order); - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + if (asc) { // query ended in/started from current block if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) { if ((code = doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo, cur->slot)) != TSDB_CODE_SUCCESS) { @@ -1193,7 +1267,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, assert(pCheckInfo->lastKey <= pBlock->keyLast); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { // the whole block is loaded in to buffer - cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(pBlock->numOfRows - 1); + cur->pos = asc? 0:(pBlock->numOfRows - 1); code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } else { //desc order, query ended in current block @@ -1213,7 +1287,7 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock, assert(pCheckInfo->lastKey >= pBlock->keyFirst); doMergeTwoLevelData(pQueryHandle, pCheckInfo, pBlock); } else { - cur->pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0:(pBlock->numOfRows-1); + cur->pos = asc? 0:(pBlock->numOfRows-1); code = handleDataMergeIfNeeded(pQueryHandle, pBlock, pCheckInfo); } } @@ -1327,7 +1401,7 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity // todo refactor, only copy one-by-one for (int32_t k = start; k < num + start; ++k) { - char* p = tdGetColDataOfRow(src, k); + const char* p = tdGetColDataOfRow(src, k); memcpy(dst, p, varDataTLen(p)); dst += bytes; } @@ -1378,88 +1452,170 @@ int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity return numOfRows + num; } -static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SDataRow row, +static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, SMemRow row, int32_t numOfCols, STable* pTable, STSchema* pSchema) { char* pData = NULL; - // the schema version info is embeded in SDataRow + // the schema version info is embedded in SDataRow, and use latest schema version for SKVRow int32_t numOfRowCols = 0; if (pSchema == NULL) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); numOfRowCols = schemaNCols(pSchema); } else { numOfRowCols = schemaNCols(pSchema); } - - int32_t i = 0, j = 0; - while(i < numOfCols && j < numOfRowCols) { - SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); - if (pSchema->columns[j].colId < pColInfo->info.colId) { - j++; - continue; - } - if (ASCENDING_TRAVERSE(pQueryHandle->order)) { - pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; - } else { - pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + int32_t i = 0; + + if (isDataRow(row)) { + SDataRow dataRow = memRowDataBody(row); + int32_t j = 0; + while (i < numOfCols && j < numOfRowCols) { + SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + if (pSchema->columns[j].colId < pColInfo->info.colId) { + j++; + continue; + } + + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; + } else { + pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + } + + if (pSchema->columns[j].colId == pColInfo->info.colId) { + void* value = + tdGetRowDataOfCol(dataRow, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t*)pData = *(uint8_t*)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t*)pData = *(uint16_t*)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t*)pData = *(uint32_t*)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t*)pData = *(uint64_t*)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY*)pData = tdGetKey(*(TKEY*)value); + } else { + *(TSKEY*)pData = *(TSKEY*)value; + } + break; + default: + memcpy(pData, value, pColInfo->info.bytes); + } + + j++; + i++; + } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data + if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull(pData, pColInfo->info.type); + } else { + setNull(pData, pColInfo->info.type, pColInfo->info.bytes); + } + i++; + } } + } else if (isKvRow(row)) { + SKVRow kvRow = memRowKvBody(row); + int32_t k = 0; + int32_t nKvRowCols = kvRowNCols(kvRow); - if (pSchema->columns[j].colId == pColInfo->info.colId) { - void* value = tdGetRowDataOfCol(row, (int8_t)pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset); - switch (pColInfo->info.type) { - case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: - memcpy(pData, value, varDataTLen(value)); - break; - case TSDB_DATA_TYPE_NULL: - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: - *(uint8_t *)pData = *(uint8_t *)value; - break; - case TSDB_DATA_TYPE_SMALLINT: - case TSDB_DATA_TYPE_USMALLINT: - *(uint16_t *)pData = *(uint16_t *)value; - break; - case TSDB_DATA_TYPE_INT: - case TSDB_DATA_TYPE_UINT: - *(uint32_t *)pData = *(uint32_t *)value; - break; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - *(uint64_t *)pData = *(uint64_t *)value; - break; - case TSDB_DATA_TYPE_FLOAT: - SET_FLOAT_PTR(pData, value); - break; - case TSDB_DATA_TYPE_DOUBLE: - SET_DOUBLE_PTR(pData, value); - break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { - *(TSKEY *)pData = tdGetKey(*(TKEY *)value); - } else { - *(TSKEY *)pData = *(TSKEY *)value; - } - break; - default: - memcpy(pData, value, pColInfo->info.bytes); + while (i < numOfCols && k < nKvRowCols) { + SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); + SColIdx* pColIdx = kvRowColIdxAt(kvRow, k); + + if (pColIdx->colId < pColInfo->info.colId) { + ++k; + continue; } - j++; - i++; - } else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; + } else { + pData = (char*)pColInfo->pData + (capacity - numOfRows - 1) * pColInfo->info.bytes; + } + + if (pColIdx->colId == pColInfo->info.colId) { + // offset of pColIdx for SKVRow including the TD_KV_ROW_HEAD_SIZE + void* value = tdGetKvRowDataOfCol(kvRow, pColIdx->offset); + switch (pColInfo->info.type) { + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + memcpy(pData, value, varDataTLen(value)); + break; + case TSDB_DATA_TYPE_NULL: + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: + *(uint8_t*)pData = *(uint8_t*)value; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + *(uint16_t*)pData = *(uint16_t*)value; + break; + case TSDB_DATA_TYPE_INT: + case TSDB_DATA_TYPE_UINT: + *(uint32_t*)pData = *(uint32_t*)value; + break; + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + *(uint64_t*)pData = *(uint64_t*)value; + break; + case TSDB_DATA_TYPE_FLOAT: + SET_FLOAT_PTR(pData, value); + break; + case TSDB_DATA_TYPE_DOUBLE: + SET_DOUBLE_PTR(pData, value); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + if (pColInfo->info.colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + *(TSKEY*)pData = tdGetKey(*(TKEY*)value); + } else { + *(TSKEY*)pData = *(TSKEY*)value; + } + break; + default: + memcpy(pData, value, pColInfo->info.bytes); + } + ++k; + ++i; + continue; + } + // If (pColInfo->info.colId < pColIdx->colId), it is NULL data if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { setVardataNull(pData, pColInfo->info.type); } else { setNull(pData, pColInfo->info.type, pColInfo->info.bytes); } - i++; + ++i; } + } else { + ASSERT(0); } - while (i < numOfCols) { // the remain columns are all null data + while (i < numOfCols) { // the remain columns are all null data SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i); if (ASCENDING_TRAVERSE(pQueryHandle->order)) { pData = (char*)pColInfo->pData + numOfRows * pColInfo->info.bytes; @@ -1476,7 +1632,6 @@ static void copyOneRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, i++; } } - static void moveDataToFront(STsdbQueryHandle* pQueryHandle, int32_t numOfRows, int32_t numOfCols) { if (numOfRows == 0 || ASCENDING_TRAVERSE(pQueryHandle->order)) { return; @@ -1656,12 +1811,12 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* } else if (pCheckInfo->iter != NULL || pCheckInfo->iiter != NULL) { SSkipListNode* node = NULL; do { - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); if (row == NULL) { break; } - TSKEY key = dataRowKey(row); + TSKEY key = memRowKey(row); if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) { break; @@ -1674,11 +1829,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) { - if (rv != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - rv = dataRowVersion(row); - } - + if (rv != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + rv = memRowVersion(row); + } + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -1692,11 +1847,11 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it if (pCfg->update) { - if (rv != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - rv = dataRowVersion(row); + if (rv != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + rv = memRowVersion(row); } - + copyOneRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row, numOfCols, pTable, pSchema); numOfRows += 1; if (cur->win.skey == TSKEY_INITIAL_VAL) { @@ -1746,8 +1901,10 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* * copy them all to result buffer, since it may be overlapped with file data block. */ if (node == NULL || - ((dataRowKey((SDataRow)SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) || - ((dataRowKey((SDataRow)SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) { + ((memRowKey((SMemRow)SL_GET_NODE_DATA(node)) > pQueryHandle->window.ekey) && + ASCENDING_TRAVERSE(pQueryHandle->order)) || + ((memRowKey((SMemRow)SL_GET_NODE_DATA(node)) < pQueryHandle->window.ekey) && + !ASCENDING_TRAVERSE(pQueryHandle->order))) { // no data in cache or data in cache is greater than the ekey of time window, load data from file block if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = tsArray[pos]; @@ -2333,12 +2490,12 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STSchema* pSchema = NULL; do { - SDataRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); + SMemRow row = getSDataRowInTableMem(pCheckInfo, pQueryHandle->order, pCfg->update); if (row == NULL) { break; } - TSKEY key = dataRowKey(row); + TSKEY key = memRowKey(row); if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) || (key < maxKey && !ASCENDING_TRAVERSE(pQueryHandle->order))) { tsdbDebug("%p key:%"PRIu64" beyond qrange:%"PRId64" - %"PRId64", no more data in buffer", pQueryHandle, key, pQueryHandle->window.skey, pQueryHandle->window.ekey); @@ -2351,9 +2508,9 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } win->ekey = key; - if (rv != dataRowVersion(row)) { - pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row)); - rv = dataRowVersion(row); + if (rv != memRowVersion(row)) { + pSchema = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row)); + rv = memRowVersion(row); } copyOneRowFromMem(pQueryHandle, maxRowsToRead, numOfRows, row, numOfCols, pTable, pSchema); @@ -2470,7 +2627,7 @@ static bool loadCachedLastRow(STsdbQueryHandle* pQueryHandle) { SQueryFilePos* cur = &pQueryHandle->cur; - SDataRow pRow = NULL; + SMemRow pRow = NULL; TSKEY key = TSKEY_INITIAL_VAL; int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1; @@ -2684,13 +2841,19 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) { bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) { STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle; + if (emptyQueryTimewindow(pQueryHandle)) { + tsdbDebug("%p query window not overlaps with the data set, no result returned, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId); + return false; + } + int64_t stime = taosGetTimestampUs(); int64_t elapsedTime = stime; + // TODO refactor: remove "type" if (pQueryHandle->type == TSDB_QUERY_TYPE_LAST) { - if (pQueryHandle->cachelastrow == 1) { + if (pQueryHandle->cachelastrow == TSDB_CACHED_TYPE_LASTROW) { return loadCachedLastRow(pQueryHandle); - } else if (pQueryHandle->cachelastrow == 2) { + } else if (pQueryHandle->cachelastrow == TSDB_CACHED_TYPE_LAST) { return loadCachedLast(pQueryHandle); } } @@ -2803,6 +2966,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM SArray* psTable = NULL; pSecQueryHandle->pTableCheckInfo = createCheckInfoFromCheckInfo(pCurrent, pSecQueryHandle->window.skey, &psTable); if (pSecQueryHandle->pTableCheckInfo == NULL) { + taosArrayDestroy(psTable); terrno = TSDB_CODE_QRY_OUT_OF_MEMORY; goto out_of_memory; } @@ -2873,7 +3037,7 @@ bool tsdbGetExternalRow(TsdbQueryHandleT pHandle) { * if lastRow == NULL, return TSDB_CODE_TDB_NO_CACHE_LAST_ROW * else set pRes and return TSDB_CODE_SUCCESS and save lastKey */ -int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey) { +int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey) { int32_t code = TSDB_CODE_SUCCESS; TSDB_RLOCK_TABLE(pTable); @@ -2884,7 +3048,7 @@ int32_t tsdbGetCachedLastRow(STable* pTable, SDataRow* pRes, TSKEY* lastKey) { } if (pRes) { - *pRes = tdDataRowDup(pTable->lastRow); + *pRes = tdMemRowDup(pTable->lastRow); if (*pRes == NULL) { code = TSDB_CODE_TDB_OUT_OF_MEMORY; } @@ -2896,7 +3060,7 @@ out: } bool isTsdbCacheLastRow(TsdbQueryHandleT* pQueryHandle) { - return ((STsdbQueryHandle *)pQueryHandle)->cachelastrow > 0; + return ((STsdbQueryHandle *)pQueryHandle)->cachelastrow > TSDB_CACHED_TYPE_NONE; } int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList) { @@ -2914,14 +3078,14 @@ int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *g if (((STable*)pInfo->pTable)->lastRow) { code = tsdbGetCachedLastRow(pInfo->pTable, NULL, &key); if (code != TSDB_CODE_SUCCESS) { - pQueryHandle->cachelastrow = 0; + pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_NONE; } else { - pQueryHandle->cachelastrow = 1; + pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LASTROW; } } // update the tsdb query time range - if (pQueryHandle->cachelastrow) { + if (pQueryHandle->cachelastrow != TSDB_CACHED_TYPE_NONE) { pQueryHandle->window = TSWINDOW_INITIALIZER; pQueryHandle->checkFiles = false; pQueryHandle->activeIndex = -1; // start from -1 @@ -2936,12 +3100,11 @@ int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle) { int32_t code = 0; if (pQueryHandle->pTsdb && atomic_load_8(&pQueryHandle->pTsdb->hasCachedLastColumn)){ - pQueryHandle->cachelastrow = 2; + pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LAST; } // update the tsdb query time range if (pQueryHandle->cachelastrow) { - pQueryHandle->window = TSWINDOW_INITIALIZER; pQueryHandle->checkFiles = false; pQueryHandle->activeIndex = -1; // start from -1 } @@ -2954,6 +3117,7 @@ STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList) { STimeWindow window = {INT64_MAX, INT64_MIN}; int32_t totalNumOfTable = 0; + SArray* emptyGroup = taosArrayInit(16, sizeof(int32_t)); // NOTE: starts from the buffer in case of descending timestamp order check data blocks size_t numOfGroups = taosArrayGetSize(groupList->pGroupList); @@ -2996,27 +3160,30 @@ STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList) { } } - taosArrayClear(pGroup); - // more than one table in each group, only one table left for each group if (keyInfo.pTable != NULL) { totalNumOfTable++; - taosArrayPush(pGroup, &keyInfo); - } else { + if (taosArrayGetSize(pGroup) == 1) { + // do nothing + } else { + taosArrayClear(pGroup); + taosArrayPush(pGroup, &keyInfo); + } + } else { // mark all the empty groups, and remove it later taosArrayDestroy(pGroup); - - taosArrayRemove(groupList->pGroupList, j); - numOfGroups -= 1; - j -= 1; + taosArrayPush(emptyGroup, &j); } } // window does not being updated, so set the original if (window.skey == INT64_MAX && window.ekey == INT64_MIN) { window = TSWINDOW_INITIALIZER; - assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == 0); + assert(totalNumOfTable == 0 && taosArrayGetSize(groupList->pGroupList) == numOfGroups); } + taosArrayRemoveBatch(groupList->pGroupList, TARRAY_GET_START(emptyGroup), (int32_t) taosArrayGetSize(emptyGroup)); + taosArrayDestroy(emptyGroup); + groupList->numOfTables = totalNumOfTable; return window; } @@ -3216,11 +3383,13 @@ static int32_t tableGroupComparFn(const void *p1, const void *p2, const void *pa type = TSDB_DATA_TYPE_BINARY; bytes = tGetTbnameColumnSchema()->bytes; } else { - STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); - bytes = pCol->bytes; - type = pCol->type; - f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); - f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); + if (pTableGroupSupp->pTagSchema && colIndex < pTableGroupSupp->pTagSchema->numOfCols) { + STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); + bytes = pCol->bytes; + type = pCol->type; + f1 = tdGetKVRowValOfCol(pTable1->tagVal, pCol->colId); + f2 = tdGetKVRowValOfCol(pTable2->tagVal, pCol->colId); + } } // this tags value may be NULL @@ -3355,11 +3524,16 @@ static bool tableFilterFp(const void* pNode, void* param) { } } else if (pInfo->optr == TSDB_RELATION_IN) { int type = pInfo->sch.type; - if (type == TSDB_DATA_TYPE_BOOL || type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_SMALLINT || type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_INT) { + if (type == TSDB_DATA_TYPE_BOOL || IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_TIMESTAMP) { int64_t v; GET_TYPED_DATA(v, int64_t, pInfo->sch.type, val); return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); - } else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_DOUBLE) { + } else if (IS_UNSIGNED_NUMERIC_TYPE(type)) { + uint64_t v; + GET_TYPED_DATA(v, uint64_t, pInfo->sch.type, val); + return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); + } + else if (type == TSDB_DATA_TYPE_DOUBLE || type == TSDB_DATA_TYPE_FLOAT) { double v; GET_TYPED_DATA(v, double, pInfo->sch.type, val); return NULL != taosHashGet((SHashObj *)pInfo->q, (char *)&v, sizeof(v)); @@ -3543,7 +3717,6 @@ int32_t tsdbGetOneTableGroup(STsdbRepo* tsdb, uint64_t uid, TSKEY startKey, STab taosArrayPush(group, &info); taosArrayPush(pGroupInfo->pGroupList, &group); - return TSDB_CODE_SUCCESS; _error: @@ -3614,6 +3787,10 @@ static void* doFreeColumnInfoData(SArray* pColumnInfoData) { } static void* destroyTableCheckInfo(SArray* pTableCheckInfo) { + if (pTableCheckInfo == NULL) { + return NULL; + } + size_t size = taosArrayGetSize(pTableCheckInfo); for (int32_t i = 0; i < size; ++i) { STableCheckInfo* p = taosArrayGet(pTableCheckInfo, i); @@ -3632,15 +3809,21 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { return; } - pQueryHandle->pTableCheckInfo = destroyTableCheckInfo(pQueryHandle->pTableCheckInfo); pQueryHandle->pColumns = doFreeColumnInfoData(pQueryHandle->pColumns); taosArrayDestroy(pQueryHandle->defaultLoadColumn); tfree(pQueryHandle->pDataBlockInfo); tfree(pQueryHandle->statis); - // todo check error - tsdbMayUnTakeMemSnapshot(pQueryHandle); + if (!emptyQueryTimewindow(pQueryHandle)) { + tsdbMayUnTakeMemSnapshot(pQueryHandle); + } else { + assert(pQueryHandle->pTableCheckInfo == NULL); + } + + if (pQueryHandle->pTableCheckInfo != NULL) { + pQueryHandle->pTableCheckInfo = destroyTableCheckInfo(pQueryHandle->pTableCheckInfo); + } tsdbDestroyReadH(&pQueryHandle->rhelper); @@ -3651,8 +3834,8 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); SIOCostSummary* pCost = &pQueryHandle->cost; - tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, - pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); + tsdbDebug("%p :io-cost summary: head-file read cnt:%"PRIu64", head-file time:%"PRIu64" us, statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, 0x%"PRIx64, + pQueryHandle, pCost->headFileLoad, pCost->headFileLoadTime, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); tfree(pQueryHandle); } diff --git a/src/tsdb/tests/tsdbTests.cpp b/src/tsdb/tests/tsdbTests.cpp index ac254d6c34ecd152e28bb621348312b938fecd20..dc804856fdf1e288f2a70a2813639c1324f42851 100644 --- a/src/tsdb/tests/tsdbTests.cpp +++ b/src/tsdb/tests/tsdbTests.cpp @@ -55,10 +55,10 @@ static int insertData(SInsertInfo *pInfo) { for (int j = 0; j < schemaNCols(pInfo->pSchema); j++) { STColumn *pTCol = schemaColAt(pInfo->pSchema, j); if (j == 0) { // Just for timestamp - tdAppendColVal(row, (void *)(&start_time), pTCol->type, pTCol->bytes, pTCol->offset); + tdAppendColVal(row, (void *)(&start_time), pTCol->type, pTCol->offset); } else { // For int int val = 10; - tdAppendColVal(row, (void *)(&val), pTCol->type, pTCol->bytes, pTCol->offset); + tdAppendColVal(row, (void *)(&val), pTCol->type, pTCol->offset); } } pBlock->dataLen += dataRowLen(row); diff --git a/src/util/CMakeLists.txt b/src/util/CMakeLists.txt index e8a1d61ee52c6461e88f6cdc16069b2b6b523ab5..ef304d2fcbcb3a823e2c8253ca578de551499151 100644 --- a/src/util/CMakeLists.txt +++ b/src/util/CMakeLists.txt @@ -1,16 +1,21 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/rpc/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/sync/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/rmonotonic/inc) +INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/TSZ/sz/include) + AUX_SOURCE_DIRECTORY(src SRC) ADD_LIBRARY(tutil ${SRC}) -TARGET_LINK_LIBRARIES(tutil pthread os lz4 z rmonotonic) + +TARGET_LINK_LIBRARIES(tutil pthread os lz4 z rmonotonic ${VAR_TSZ} ) + + IF (TD_LINUX) TARGET_LINK_LIBRARIES(tutil m rt) - # ADD_SUBDIRECTORY(tests) + ADD_SUBDIRECTORY(tests) FIND_PATH(ICONV_INCLUDE_EXIST iconv.h /usr/include/ /usr/local/include/) IF (ICONV_INCLUDE_EXIST) @@ -36,4 +41,4 @@ ENDIF() IF (TD_STORAGE) TARGET_LINK_LIBRARIES(tutil storage) -ENDIF () \ No newline at end of file +ENDIF () diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index c37426069cf64bb35ac5e1100d49e8103c851625..616b844c1388575130a2b1c02033cfedb7ef9e57 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -140,7 +140,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param); -void taosHashEmpty(SHashObj *pHashObj); +void taosHashClear(SHashObj *pHashObj); /** * clean up hash table diff --git a/src/util/inc/talgo.h b/src/util/inc/talgo.h index 9e3692225b6413353bf269d9ba1fbc8651273eb5..4aa54306052bfe224d81ac90f8310de7ac85f8eb 100644 --- a/src/util/inc/talgo.h +++ b/src/util/inc/talgo.h @@ -34,6 +34,7 @@ typedef int (*__compar_fn_t) (const void *, const void *); #define elePtrAt(base, size, idx) (void *)((char *)(base) + (size) * (idx)) typedef int32_t (*__ext_compar_fn_t)(const void *p1, const void *p2, const void *param); +typedef void (*__ext_swap_fn_t)(void *p1, void *p2, const void *param); /** * quick sort, with the compare function requiring additional parameters support @@ -59,6 +60,38 @@ void taosqsort(void *src, size_t numOfElem, size_t size, const void* param, __ex */ void *taosbsearch(const void *key, const void *base, size_t nmemb, size_t size, __compar_fn_t fn, int flags); +/** + * adjust heap + * + * @param base: the start address of array + * @param size: size of every item in array + * @param start: the first index + * @param end: the last index + * @param parcompar: parameters for compare function + * @param compar: user defined compare function + * @param parswap: parameters for swap function + * @param swap: user defined swap function, the default swap function doswap will be used if swap is NULL + * @param maxroot: if heap is max root heap + * @return + */ +void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot); + +/** + * sort heap to make sure it is a max/min root heap + * + * @param base: the start address of array + * @param size: size of every item in array + * @param len: the length of array + * @param parcompar: parameters for compare function + * @param compar: user defined compare function + * @param parswap: parameters for swap function + * @param swap: user defined swap function, the default swap function doswap will be used if swap is NULL + * @param maxroot: if heap is max root heap + * @return + */ +void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot); + + #ifdef __cplusplus } #endif diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index fc7b6b85841065044a0897ee3ebaa4d7cb84e53b..2da74eac820e28206cb3e2b7cb6f2c4fb9f481b8 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -52,6 +52,22 @@ void* taosArrayInit(size_t size, size_t elemSize); */ void *taosArrayAddBatch(SArray *pArray, const void *pData, int nEles); +/** + * + * @param pArray + * @param pData position array list + * @param numOfElems the number of removed position + */ +void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfElems); + +/** + * + * @param pArray + * @param comparFn + * @param fp + */ +void taosArrayRemoveDuplicate(SArray *pArray, __compar_fn_t comparFn, void (*fp)(void*)); + /** * add all element from the source array list into the destination * @param pArray @@ -197,8 +213,21 @@ void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t compa */ char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int flags); + +/** + * sort the pointer data in the array + * @param pArray + * @param compar + * @param param + * @return + */ + +void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param); + #ifdef __cplusplus } #endif + + #endif // TDENGINE_TAOSARRAY_H diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h index efd51f90ce8739050971856dd4f2dbdd1c44d5a4..e41b544d00e55f7eece904c5957ef9c06063e6c3 100644 --- a/src/util/inc/tcache.h +++ b/src/util/inc/tcache.h @@ -83,6 +83,7 @@ typedef struct { uint8_t deleting; // set the deleting flag to stop refreshing ASAP. pthread_t refreshWorker; bool extendLifespan; // auto extend life span when one item is accessed. + int64_t checkTick; // tick used to record the check times of the refresh threads #if defined(LINUX) pthread_rwlock_t lock; #else @@ -177,6 +178,11 @@ void taosCacheCleanup(SCacheObj *pCacheObj); */ void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp); +/** + * stop background refresh worker thread + */ +void taosStopCacheRefreshWorker(); + #ifdef __cplusplus } #endif diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index fdb2595fd8d3b1659800ca3a5b88c32b6fe95e93..d03ce6e0f1f34478951a84b2ab18020f5cbec92b 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 110 +#define TSDB_CFG_MAX_NUM 116 // 110 + 6 with lossy option #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 @@ -32,6 +32,9 @@ extern "C" { #define TSDB_CFG_CTYPE_B_OPTION 16U // can be configured by taos_options function #define TSDB_CFG_CTYPE_B_NOT_PRINT 32U // such as password +#define MAX_FLOAT 100000 +#define MIN_FLOAT 0 + enum { TAOS_CFG_CSTATUS_NONE, // not configured TAOS_CFG_CSTATUS_DEFAULT, // use system default value @@ -50,6 +53,7 @@ enum { TAOS_CFG_VTYPE_IPSTR, TAOS_CFG_VTYPE_DIRECTORY, TAOS_CFG_VTYPE_DATA_DIRCTORY, + TAOS_CFG_VTYPE_DOUBLE, }; enum { diff --git a/src/util/inc/tsched.h b/src/util/inc/tsched.h index 3e481cbc327b495975fb03bc4e4d850e4372f044..a1591512c1f87f524837a7986e3c8b3e14e25924 100644 --- a/src/util/inc/tsched.h +++ b/src/util/inc/tsched.h @@ -28,10 +28,41 @@ typedef struct SSchedMsg { void *thandle; } SSchedMsg; -void *taosInitScheduler(int queueSize, int numOfThreads, const char *label); -void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl); -int taosScheduleTask(void *qhandle, SSchedMsg *pMsg); -void taosCleanUpScheduler(void *param); +/** + * Create a thread-safe ring-buffer based task queue and return the instance. A thread + * pool will be created to consume the messages in the queue. + * @param capacity the queue capacity + * @param numOfThreads the number of threads for the thread pool + * @param label the label of the queue + * @return the created queue scheduler + */ +void *taosInitScheduler(int capacity, int numOfThreads, const char *label); + +/** + * Create a thread-safe ring-buffer based task queue and return the instance. + * Same as taosInitScheduler, and it also print the queue status every 1 minite. + * @param capacity the queue capacity + * @param numOfThreads the number of threads for the thread pool + * @param label the label of the queue + * @param tmrCtrl the timer controller, tmr_ctrl_t* + * @return the created queue scheduler + */ +void *taosInitSchedulerWithInfo(int capacity, int numOfThreads, const char *label, void *tmrCtrl); + +/** + * Clean up the queue scheduler instance and free the memory. + * @param queueScheduler the queue scheduler to free + */ +void taosCleanUpScheduler(void *queueScheduler); + +/** + * Schedule a new task to run, the task is described by pMsg. + * The function may be blocked if no thread is available to execute the task. + * That may happen when all threads are busy. + * @param queueScheduler the queue scheduler instance + * @param pMsg the message for the task + */ +void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg); #ifdef __cplusplus } diff --git a/src/util/inc/tscompression.h b/src/util/inc/tscompression.h index cca6d6e25054e7736ea28590fa4cb5ab788d6b07..3f2160bbf26da0ff6eb839ddb4c303d221bce7c2 100644 --- a/src/util/inc/tscompression.h +++ b/src/util/inc/tscompression.h @@ -34,6 +34,22 @@ extern "C" { #define ONE_STAGE_COMP 1 #define TWO_STAGE_COMP 2 +// +// compressed data first byte foramt +// ------ 7 bit ---- | ---- 1 bit ---- +// algorithm mode +// + +// compression data mode save first byte lower 1 bit +#define MODE_NOCOMPRESS 0 // original data +#define MODE_COMPRESS 1 // compatible old compress + +// compression algorithm save first byte higher 7 bit +#define ALGO_SZ_LOSSY 1 // SZ compress + +#define HEAD_MODE(x) x%2 +#define HEAD_ALGO(x) x/2 + extern int tsCompressINTImp(const char *const input, const int nelements, char *const output, const char type); extern int tsDecompressINTImp(const char *const input, const int nelements, char *const output, const char type); extern int tsCompressBoolImp(const char *const input, const int nelements, char *const output); @@ -46,6 +62,20 @@ extern int tsCompressDoubleImp(const char *const input, const int nelements, cha extern int tsDecompressDoubleImp(const char *const input, const int nelements, char *const output); extern int tsCompressFloatImp(const char *const input, const int nelements, char *const output); extern int tsDecompressFloatImp(const char *const input, const int nelements, char *const output); +// lossy +extern int tsCompressFloatLossyImp(const char * input, const int nelements, char *const output); +extern int tsDecompressFloatLossyImp(const char * input, int compressedSize, const int nelements, char *const output); +extern int tsCompressDoubleLossyImp(const char * input, const int nelements, char *const output); +extern int tsDecompressDoubleLossyImp(const char * input, int compressedSize, const int nelements, char *const output); + +#ifdef TD_TSZ +extern bool lossyFloat; +extern bool lossyDouble; +// init call +int tsCompressInit(); +// exit call +void tsCompressExit(); +#endif static FORCE_INLINE int tsCompressTinyint(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize) { @@ -189,56 +219,123 @@ static FORCE_INLINE int tsDecompressString(const char *const input, int compress static FORCE_INLINE int tsCompressFloat(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize) { - if (algorithm == ONE_STAGE_COMP) { - return tsCompressFloatImp(input, nelements, output); - } else if (algorithm == TWO_STAGE_COMP) { - int len = tsCompressFloatImp(input, nelements, buffer); - return tsCompressStringImp(buffer, len, output, outputSize); +#ifdef TD_TSZ + // lossy mode + if(lossyFloat) { + return tsCompressFloatLossyImp(input, nelements, output); + // lossless mode } else { - assert(0); - return -1; +#endif + if (algorithm == ONE_STAGE_COMP) { + return tsCompressFloatImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressFloatImp(input, nelements, buffer); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + return -1; + } +#ifdef TD_TSZ } +#endif } static FORCE_INLINE int tsDecompressFloat(const char *const input, int compressedSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize) { - if (algorithm == ONE_STAGE_COMP) { - return tsDecompressFloatImp(input, nelements, output); - } else if (algorithm == TWO_STAGE_COMP) { - if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1; - return tsDecompressFloatImp(buffer, nelements, output); +#ifdef TD_TSZ + if(HEAD_ALGO(input[0]) == ALGO_SZ_LOSSY){ + // decompress lossy + return tsDecompressFloatLossyImp(input, compressedSize, nelements, output); } else { - assert(0); - return -1; +#endif + // decompress lossless + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressFloatImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1; + return tsDecompressFloatImp(buffer, nelements, output); + } else { + assert(0); + return -1; + } +#ifdef TD_TSZ } +#endif } + static FORCE_INLINE int tsCompressDouble(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize) { - if (algorithm == ONE_STAGE_COMP) { - return tsCompressDoubleImp(input, nelements, output); - } else if (algorithm == TWO_STAGE_COMP) { - int len = tsCompressDoubleImp(input, nelements, buffer); - return tsCompressStringImp(buffer, len, output, outputSize); +#ifdef TD_TSZ + if(lossyDouble){ + // lossy mode + return tsCompressDoubleLossyImp(input, nelements, output); } else { - assert(0); - return -1; +#endif + // lossless mode + if (algorithm == ONE_STAGE_COMP) { + return tsCompressDoubleImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + int len = tsCompressDoubleImp(input, nelements, buffer); + return tsCompressStringImp(buffer, len, output, outputSize); + } else { + assert(0); + return -1; + } +#ifdef TD_TSZ } +#endif } static FORCE_INLINE int tsDecompressDouble(const char *const input, int compressedSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize) { - if (algorithm == ONE_STAGE_COMP) { - return tsDecompressDoubleImp(input, nelements, output); - } else if (algorithm == TWO_STAGE_COMP) { - if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1; - return tsDecompressDoubleImp(buffer, nelements, output); + #ifdef TD_TSZ + if(HEAD_ALGO(input[0]) == ALGO_SZ_LOSSY){ + // decompress lossy + return tsDecompressDoubleLossyImp(input, compressedSize, nelements, output); } else { - assert(0); - return -1; + #endif + // decompress lossless + if (algorithm == ONE_STAGE_COMP) { + return tsDecompressDoubleImp(input, nelements, output); + } else if (algorithm == TWO_STAGE_COMP) { + if (tsDecompressStringImp(input, compressedSize, buffer, bufferSize) < 0) return -1; + return tsDecompressDoubleImp(buffer, nelements, output); + } else { + assert(0); + return -1; + } +#ifdef TD_TSZ } +#endif +} + +#ifdef TD_TSZ +// +// lossy float double +// +static FORCE_INLINE int tsCompressFloatLossy(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize) { + return tsCompressFloatLossyImp(input, nelements, output); +} + +static FORCE_INLINE int tsDecompressFloatLossy(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize){ + return tsDecompressFloatLossyImp(input, compressedSize, nelements, output); +} + +static FORCE_INLINE int tsCompressDoubleLossy(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, + char algorithm, char *const buffer, int bufferSize){ + return tsCompressDoubleLossyImp(input, nelements, output); +} + +static FORCE_INLINE int tsDecompressDoubleLossy(const char *const input, int compressedSize, const int nelements, char *const output, + int outputSize, char algorithm, char *const buffer, int bufferSize){ + return tsDecompressDoubleLossyImp(input, compressedSize, nelements, output); } +#endif + static FORCE_INLINE int tsCompressTimestamp(const char *const input, int inputSize, const int nelements, char *const output, int outputSize, char algorithm, char *const buffer, int bufferSize) { if (algorithm == ONE_STAGE_COMP) { diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 5b3218382fff38382a536c8ece077fd2f350adf2..d7bee9b67cad8fe91a182d76a443c04fd82be44c 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -144,6 +144,14 @@ static FORCE_INLINE SHashNode *doUpdateHashNode(SHashObj *pHashObj, SHashEntry* */ static void pushfrontNodeInEntryList(SHashEntry *pEntry, SHashNode *pNode); +/** + * Check whether the hash table is empty or not. + * + * @param pHashObj the hash table object + * @return if the hash table is empty or not + */ +static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj); + /** * Get the next element in hash table for iterator * @param pIter @@ -195,7 +203,16 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp) { } } -int32_t taosHashGetSize(const SHashObj *pHashObj) { return (int32_t)((pHashObj == NULL) ? 0 : pHashObj->size); } +int32_t taosHashGetSize(const SHashObj *pHashObj) { + if (!pHashObj) { + return 0; + } + return (int32_t)atomic_load_64(&pHashObj->size); +} + +static FORCE_INLINE bool taosHashTableEmpty(const SHashObj *pHashObj) { + return taosHashGetSize(pHashObj) == 0; +} int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); @@ -281,7 +298,7 @@ void *taosHashGet(SHashObj *pHashObj, const void *key, size_t keyLen) { } void* taosHashGetClone(SHashObj *pHashObj, const void *key, size_t keyLen, void (*fp)(void *), void* d, size_t dsize) { - if (pHashObj->size <= 0 || keyLen == 0 || key == NULL) { + if (taosHashTableEmpty(pHashObj) || keyLen == 0 || key == NULL) { return NULL; } @@ -338,7 +355,7 @@ int32_t taosHashRemove(SHashObj *pHashObj, const void *key, size_t keyLen) { } int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t dsize) { - if (pHashObj == NULL || pHashObj->size <= 0) { + if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) { return -1; } @@ -405,7 +422,7 @@ int32_t taosHashRemoveWithData(SHashObj *pHashObj, const void *key, size_t keyLe } int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), void *param) { - if (pHashObj == NULL || pHashObj->size == 0) { + if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) { return 0; } @@ -478,7 +495,7 @@ int32_t taosHashCondTraverse(SHashObj *pHashObj, bool (*fp)(void *, void *), voi return 0; } -void taosHashEmpty(SHashObj *pHashObj) { +void taosHashClear(SHashObj *pHashObj) { if (pHashObj == NULL) { return; } @@ -517,7 +534,7 @@ void taosHashCleanup(SHashObj *pHashObj) { return; } - taosHashEmpty(pHashObj); + taosHashClear(pHashObj); tfree(pHashObj->hashList); // destroy mem block @@ -535,7 +552,7 @@ void taosHashCleanup(SHashObj *pHashObj) { // for profile only int32_t taosHashGetMaxOverflowLinkLength(const SHashObj *pHashObj) { - if (pHashObj == NULL || pHashObj->size == 0) { + if (pHashObj == NULL || taosHashTableEmpty(pHashObj)) { return 0; } diff --git a/src/util/src/talgo.c b/src/util/src/talgo.c index 278683539e3247b4b6dcd43687ac281368a7d31d..54b7e00eb7dd6f31ac8c8e6afa89790846abac5b 100644 --- a/src/util/src/talgo.c +++ b/src/util/src/talgo.c @@ -225,3 +225,89 @@ void * taosbsearch(const void *key, const void *base, size_t nmemb, size_t size, return NULL; } + +void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot) +{ + int32_t parent; + int32_t child; + char *buf; + + if (base && size > 0 && compar) { + parent = start; + child = 2 * parent + 1; + + if (swap == NULL) { + buf = calloc(1, size); + if (buf == NULL) { + return; + } + } + + if (maxroot) { + while (child <= end) { + if (child + 1 <= end && (*compar)(elePtrAt(base, size, child), elePtrAt(base, size, child + 1), parcompar) < 0) { + child++; + } + + if ((*compar)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parcompar) > 0) { + break; + } + + if (swap == NULL) { + doswap(elePtrAt(base, size, parent), elePtrAt(base, size, child), size, buf); + } else { + (*swap)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parswap); + } + + parent = child; + child = 2 * parent + 1; + } + } else { + while (child <= end) { + if (child + 1 <= end && (*compar)(elePtrAt(base, size, child), elePtrAt(base, size, child + 1), parcompar) > 0) { + child++; + } + + if ((*compar)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parcompar) < 0) { + break; + } + + if (swap == NULL) { + doswap(elePtrAt(base, size, parent), elePtrAt(base, size, child), size, buf); + } else { + (*swap)(elePtrAt(base, size, parent), elePtrAt(base, size, child), parswap); + } + + parent = child; + child = 2 * parent + 1; + } + } + + if (swap == NULL) { + tfree(buf); + } + } +} + +void taosheapsort(void *base, int32_t size, int32_t len, const void *parcompar, __ext_compar_fn_t compar, const void *parswap, __ext_swap_fn_t swap, bool maxroot) +{ + int32_t i; + + if (base && size > 0) { + for (i = len / 2 - 1; i >= 0; i--) { + taosheapadjust(base, size, i, len - 1, parcompar, compar, parswap, swap, maxroot); + } + } + +/* + char *buf = calloc(1, size); + + for (i = len - 1; i > 0; i--) { + doswap(elePtrAt(base, size, 0), elePtrAt(base, size, i)); + taosheapadjust(base, size, 0, i - 1, parcompar, compar, parswap, swap, maxroot); + } + + tfree(buf); +*/ +} + diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 5e7d9d14da870174964ae56627de96b2955e03f0..d0d126c1e4d7f2e7c0913585df6031b556291fc3 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -15,6 +15,7 @@ #include "os.h" #include "tarray.h" +#include "talgo.h" void* taosArrayInit(size_t size, size_t elemSize) { assert(elemSize > 0); @@ -82,6 +83,87 @@ void* taosArrayAddBatch(SArray* pArray, const void* pData, int nEles) { return dst; } +void taosArrayRemoveBatch(SArray *pArray, const int32_t* pData, int32_t numOfElems) { + assert(pArray != NULL && pData != NULL); + if (numOfElems <= 0) { + return; + } + + size_t size = taosArrayGetSize(pArray); + if (numOfElems >= size) { + taosArrayClear(pArray); + return; + } + + int32_t i = pData[0] + 1, j = 0; + while(i < size) { + if (j == numOfElems - 1) { + break; + } + + char* p = TARRAY_GET_ELEM(pArray, i); + if (i > pData[j] && i < pData[j + 1]) { + char* dst = TARRAY_GET_ELEM(pArray, i - (j + 1)); + memmove(dst, p, pArray->elemSize); + } else if (i == pData[j + 1]) { + j += 1; + } + + i += 1; + } + + assert(i == pData[numOfElems - 1] + 1); + + int32_t dstIndex = pData[numOfElems - 1] - numOfElems + 1; + int32_t srcIndex = pData[numOfElems - 1] + 1; + + char* dst = TARRAY_GET_ELEM(pArray, dstIndex); + char* src = TARRAY_GET_ELEM(pArray, srcIndex); + memmove(dst, src, pArray->elemSize * (pArray->size - numOfElems)); + + pArray->size -= numOfElems; +} + +void taosArrayRemoveDuplicate(SArray *pArray, __compar_fn_t comparFn, void (*fp)(void*)) { + assert(pArray); + + size_t size = pArray->size; + if (size <= 1) { + return; + } + + int32_t pos = 0; + for(int32_t i = 1; i < size; ++i) { + char* p1 = taosArrayGet(pArray, pos); + char* p2 = taosArrayGet(pArray, i); + + if (comparFn(p1, p2) == 0) { + // do nothing + } else { + if (pos + 1 != i) { + void* p = taosArrayGet(pArray, pos + 1); + if (fp != NULL) { + fp(p); + } + + taosArraySet(pArray, pos + 1, p2); + pos += 1; + } else { + pos += 1; + } + } + } + + if (fp != NULL) { + for(int32_t i = pos + 1; i < pArray->size; ++i) { + void* p = taosArrayGet(pArray, i); + fp(p); + } + } + + pArray->size = pos + 1; +} + void* taosArrayAddAll(SArray* pArray, const SArray* pInput) { return taosArrayAddBatch(pArray, pInput->pData, (int32_t) taosArrayGetSize(pInput)); } @@ -249,4 +331,62 @@ char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t return NULL; } return *(char**)p; -} \ No newline at end of file +} + +static int taosArrayPartition(SArray *pArray, int i, int j, __ext_compar_fn_t fn, const void *userData) { + void* key = taosArrayGetP(pArray, i); + while (i < j) { + while (i < j && fn(taosArrayGetP(pArray, j), key, userData) >= 0) { j--; } + if (i < j) { + void *a = taosArrayGetP(pArray, j); + taosArraySet(pArray, i, &a); + } + while (i < j && fn(taosArrayGetP(pArray, i), key, userData) <= 0) { i++;} + if (i < j) { + void *a = taosArrayGetP(pArray, i); + taosArraySet(pArray, j, &a); + } + } + taosArraySet(pArray, i, &key); + return i; +} + +static void taosArrayQuicksortHelper(SArray *pArray, int low, int high, __ext_compar_fn_t fn, const void *param) { + if (low < high) { + int idx = taosArrayPartition(pArray, low, high, fn, param); + taosArrayQuicksortHelper(pArray, low, idx - 1, fn, param); + taosArrayQuicksortHelper(pArray, idx + 1, high, fn, param); + } +} + +static void taosArrayQuickSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + if (pArray->size <= 1) { + return; + } + taosArrayQuicksortHelper(pArray, 0, (int)(taosArrayGetSize(pArray) - 1), fn, param); +} +static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + if (pArray->size <= 1) { + return; + } + for (int i = 1; i <= pArray->size - 1; ++i) { + for (int j = i; j > 0; --j) { + if (fn(taosArrayGetP(pArray, j), taosArrayGetP(pArray, j - 1), param) == -1) { + void *a = taosArrayGetP(pArray, j); + void *b = taosArrayGetP(pArray, j - 1); + taosArraySet(pArray, j - 1, &a); + taosArraySet(pArray, j, &b); + } else { + break; + } + } + } + return; + +} +// order array +void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + taosArrayGetSize(pArray) > 8 ? + taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param); +} +//TODO(yihaoDeng) add order array diff --git a/src/util/src/tbuffer.c b/src/util/src/tbuffer.c index abfa35f42cc9798d3a8a7dd7d6b874705ace88ba..c06d1e59bd537bc009b462f0b17b9cdd00321d20 100644 --- a/src/util/src/tbuffer.c +++ b/src/util/src/tbuffer.c @@ -18,6 +18,24 @@ #include "exception.h" #include "taoserror.h" +typedef union Un4B { + uint32_t ui; + float f; +} Un4B; +#if __STDC_VERSION__ >= 201112L +static_assert(sizeof(Un4B) == sizeof(uint32_t), "sizeof(Un4B) must equal to sizeof(uint32_t)"); +static_assert(sizeof(Un4B) == sizeof(float), "sizeof(Un4B) must equal to sizeof(float)"); +#endif + +typedef union Un8B { + uint64_t ull; + double d; +} Un8B; +#if __STDC_VERSION__ >= 201112L +static_assert(sizeof(Un8B) == sizeof(uint64_t), "sizeof(Un8B) must equal to sizeof(uint64_t)"); +static_assert(sizeof(Un8B) == sizeof(double), "sizeof(Un8B) must equal to sizeof(double)"); +#endif + //////////////////////////////////////////////////////////////////////////////// // reader functions @@ -175,13 +193,21 @@ uint64_t tbufReadUint64( SBufferReader* buf ) { } float tbufReadFloat( SBufferReader* buf ) { - uint32_t ret = tbufReadUint32( buf ); - return *(float*)( &ret ); + Un4B _un; + tbufReadToBuffer( buf, &_un, sizeof(_un) ); + if( buf->endian ) { + _un.ui = ntohl( _un.ui ); + } + return _un.f; } double tbufReadDouble(SBufferReader* buf) { - uint64_t ret = tbufReadUint64( buf ); - return *(double*)( &ret ); + Un8B _un; + tbufReadToBuffer( buf, &_un, sizeof(_un) ); + if( buf->endian ) { + _un.ull = htobe64( _un.ull ); + } + return _un.d; } //////////////////////////////////////////////////////////////////////////////// @@ -381,17 +407,37 @@ void tbufWriteUint64At( SBufferWriter* buf, size_t pos, uint64_t data ) { } void tbufWriteFloat( SBufferWriter* buf, float data ) { - tbufWriteUint32( buf, *(uint32_t*)(&data) ); + Un4B _un; + _un.f = data; + if( buf->endian ) { + _un.ui = htonl( _un.ui ); + } + tbufWrite( buf, &_un, sizeof(_un) ); } void tbufWriteFloatAt( SBufferWriter* buf, size_t pos, float data ) { - tbufWriteUint32At( buf, pos, *(uint32_t*)(&data) ); + Un4B _un; + _un.f = data; + if( buf->endian ) { + _un.ui = htonl( _un.ui ); + } + tbufWriteAt( buf, pos, &_un, sizeof(_un) ); } void tbufWriteDouble( SBufferWriter* buf, double data ) { - tbufWriteUint64( buf, *(uint64_t*)(&data) ); + Un8B _un; + _un.d = data; + if( buf->endian ) { + _un.ull = htobe64( _un.ull ); + } + tbufWrite( buf, &_un, sizeof(_un) ); } void tbufWriteDoubleAt( SBufferWriter* buf, size_t pos, double data ) { - tbufWriteUint64At( buf, pos, *(uint64_t*)(&data) ); + Un8B _un; + _un.d = data; + if( buf->endian ) { + _un.ull = htobe64( _un.ull ); + } + tbufWriteAt( buf, pos, &_un, sizeof(_un) ); } diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c index 569f9b01bda6d3a8cf5de6ba4b089e7bab49ca9f..4aa5b4378f79eaac7c31465cb90880c282670bcc 100644 --- a/src/util/src/tcache.c +++ b/src/util/src/tcache.c @@ -54,6 +54,45 @@ static FORCE_INLINE void __cache_lock_destroy(SCacheObj *pCacheObj) { #endif } +/** + * do cleanup the taos cache + * @param pCacheObj + */ +static void doCleanupDataCache(SCacheObj *pCacheObj); + +/** + * refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime + * @param handle Cache object handle + */ +static void* taosCacheTimedRefresh(void *handle); + +static pthread_t cacheRefreshWorker = {0}; +static pthread_once_t cacheThreadInit = PTHREAD_ONCE_INIT; +static pthread_mutex_t guard = PTHREAD_MUTEX_INITIALIZER; +static SArray* pCacheArrayList = NULL; +static bool stopRefreshWorker = false; + +static void doInitRefreshThread(void) { + pCacheArrayList = taosArrayInit(4, POINTER_BYTES); + + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + pthread_create(&cacheRefreshWorker, &thattr, taosCacheTimedRefresh, NULL); + pthread_attr_destroy(&thattr); +} + +pthread_t doRegisterCacheObj(SCacheObj* pCacheObj) { + pthread_once(&cacheThreadInit, doInitRefreshThread); + + pthread_mutex_lock(&guard); + taosArrayPush(pCacheArrayList, &pCacheObj); + pthread_mutex_unlock(&guard); + + return cacheRefreshWorker; +} + /** * @param key key of object for hash, usually a null-terminated string * @param keyLen length of key @@ -142,19 +181,9 @@ static FORCE_INLINE void doDestroyTrashcanElem(SCacheObj* pCacheObj, STrashElem free(pElem); } -/** - * do cleanup the taos cache - * @param pCacheObj - */ -static void doCleanupDataCache(SCacheObj *pCacheObj); - -/** - * refresh cache to remove data in both hash list and trash, if any nodes' refcount == 0, every pCacheObj->refreshTime - * @param handle Cache object handle - */ -static void* taosCacheTimedRefresh(void *handle); - SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_free_fn_t fn, const char* cacheName) { + const int32_t SLEEP_DURATION = 500; //500 ms + if (refreshTimeInSeconds <= 0) { return NULL; } @@ -174,9 +203,10 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext } // set free cache node callback function - pCacheObj->freeFp = fn; + pCacheObj->freeFp = fn; pCacheObj->refreshTime = refreshTimeInSeconds * 1000; - pCacheObj->extendLifespan = extendLifespan; + pCacheObj->checkTick = pCacheObj->refreshTime / SLEEP_DURATION; + pCacheObj->extendLifespan = extendLifespan; // the TTL after the last access if (__cache_lock_init(pCacheObj) != 0) { taosHashCleanup(pCacheObj->pHashTable); @@ -186,13 +216,7 @@ SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool ext return NULL; } - pthread_attr_t thattr; - pthread_attr_init(&thattr); - pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - - pthread_create(&pCacheObj->refreshWorker, &thattr, taosCacheTimedRefresh, pCacheObj); - - pthread_attr_destroy(&thattr); + doRegisterCacheObj(pCacheObj); return pCacheObj; } @@ -364,7 +388,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { if (pCacheObj->extendLifespan && (!inTrashcan) && (!_remove)) { atomic_store_64(&pNode->expireTime, pNode->lifespan + taosGetTimestampMs()); - uDebug("cache:%s data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime); + uDebug("cache:%s, data:%p extend expire time: %"PRId64, pCacheObj->name, pNode->data, pNode->expireTime); } if (_remove) { @@ -510,8 +534,10 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { } pCacheObj->deleting = 1; - if (taosCheckPthreadValid(pCacheObj->refreshWorker)) { - pthread_join(pCacheObj->refreshWorker, NULL); + + // wait for the refresh thread quit before destroying the cache object. + while(atomic_load_8(&pCacheObj->deleting) != 0) { + taosMsleep(50); } uInfo("cache:%s will be cleaned up", pCacheObj->name); @@ -650,48 +676,79 @@ static void doCacheRefresh(SCacheObj* pCacheObj, int64_t time, __cache_free_fn_t } void* taosCacheTimedRefresh(void *handle) { - SCacheObj* pCacheObj = handle; - if (pCacheObj == NULL) { - uDebug("object is destroyed. no refresh retry"); - return NULL; - } + assert(pCacheArrayList != NULL); + uDebug("cache refresh thread starts"); - const int32_t SLEEP_DURATION = 500; //500 ms - int64_t totalTick = pCacheObj->refreshTime / SLEEP_DURATION; + setThreadName("cacheTimedRefre"); + const int32_t SLEEP_DURATION = 500; //500 ms int64_t count = 0; - while(1) { - taosMsleep(500); - // check if current cache object will be deleted every 500ms. - if (pCacheObj->deleting) { - uDebug("%s refresh threads quit", pCacheObj->name); - break; + while(1) { + taosMsleep(SLEEP_DURATION); + if (stopRefreshWorker) { + goto _end; } - if (++count < totalTick) { - continue; - } + pthread_mutex_lock(&guard); + size_t size = taosArrayGetSize(pCacheArrayList); + pthread_mutex_unlock(&guard); - // reset the count value - count = 0; - size_t elemInHash = taosHashGetSize(pCacheObj->pHashTable); - if (elemInHash + pCacheObj->numOfElemsInTrash == 0) { - continue; - } + count += 1; - uDebug("%s refresh thread timed scan", pCacheObj->name); - pCacheObj->statistics.refreshCount++; + for(int32_t i = 0; i < size; ++i) { + pthread_mutex_lock(&guard); + SCacheObj* pCacheObj = taosArrayGetP(pCacheArrayList, i); - // refresh data in hash table - if (elemInHash > 0) { - int64_t now = taosGetTimestampMs(); - doCacheRefresh(pCacheObj, now, NULL); - } + if (pCacheObj == NULL) { + uError("object is destroyed. ignore and try next"); + pthread_mutex_unlock(&guard); + continue; + } + + // check if current cache object will be deleted every 500ms. + if (pCacheObj->deleting) { + taosArrayRemove(pCacheArrayList, i); + size = taosArrayGetSize(pCacheArrayList); + + uDebug("%s is destroying, remove it from refresh list, remain cache obj:%"PRIzu, pCacheObj->name, size); + pCacheObj->deleting = 0; //reset the deleting flag to enable pCacheObj to continue releasing resources. + + pthread_mutex_unlock(&guard); + continue; + } + + pthread_mutex_unlock(&guard); + + if ((count % pCacheObj->checkTick) != 0) { + continue; + } - taosTrashcanEmpty(pCacheObj, false); + size_t elemInHash = taosHashGetSize(pCacheObj->pHashTable); + if (elemInHash + pCacheObj->numOfElemsInTrash == 0) { + continue; + } + + uDebug("%s refresh thread scan", pCacheObj->name); + pCacheObj->statistics.refreshCount++; + + // refresh data in hash table + if (elemInHash > 0) { + int64_t now = taosGetTimestampMs(); + doCacheRefresh(pCacheObj, now, NULL); + } + + taosTrashcanEmpty(pCacheObj, false); + } } + _end: + taosArrayDestroy(pCacheArrayList); + + pCacheArrayList = NULL; + pthread_mutex_destroy(&guard); + + uDebug("cache refresh thread quits"); return NULL; } @@ -703,3 +760,7 @@ void taosCacheRefresh(SCacheObj *pCacheObj, __cache_free_fn_t fp) { int64_t now = taosGetTimestampMs(); doCacheRefresh(pCacheObj, now, fp); } + +void taosStopCacheRefreshWorker() { + stopRefreshWorker = false; +} \ No newline at end of file diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c index 1de6e76f7150b85dd804fbe6cbfa7cb3b1487895..1a68243028d32c2f427b855db6521b371c462783 100644 --- a/src/util/src/tcompression.c +++ b/src/util/src/tcompression.c @@ -49,9 +49,14 @@ #include "os.h" #include "lz4.h" +#ifdef TD_TSZ + #include "td_sz.h" +#endif #include "taosdef.h" #include "tscompression.h" #include "tulog.h" +#include "tglobal.h" + static const int TEST_NUMBER = 1; #define is_bigendian() ((*(char *)&TEST_NUMBER) == 0) @@ -61,6 +66,39 @@ static const int TEST_NUMBER = 1; #define ZIGZAG_ENCODE(T, v) ((u##T)((v) >> (sizeof(T) * 8 - 1))) ^ (((u##T)(v)) << 1) // zigzag encode #define ZIGZAG_DECODE(T, v) ((v) >> 1) ^ -((T)((v)&1)) // zigzag decode +#ifdef TD_TSZ +bool lossyFloat = false; +bool lossyDouble = false; + +// init call +int tsCompressInit(){ + // config + if(lossyColumns[0] == 0){ + lossyFloat = false; + lossyDouble = false; + return 0; + } + + lossyFloat = strstr(lossyColumns, "float") != NULL; + lossyDouble = strstr(lossyColumns, "double") != NULL; + + if(lossyFloat == false && lossyDouble == false) + return 0; + + tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor); + if(lossyFloat) + uInfo("lossy compression float is opened. "); + if(lossyDouble) + uInfo("lossy compression double is opened. "); + return 1; +} +// exit call +void tsCompressExit(){ + tdszExit(); +} + +#endif + /* * Compress Integer (Simple8B). */ @@ -121,7 +159,7 @@ int tsCompressINTImp(const char *const input, const int nelements, char *const o break; } // Get difference. - if (!safeInt64Add(curr_value, -prev_value)) goto _copy_and_exit; + if (!safeInt64Add(curr_value, -prev_value_tmp)) goto _copy_and_exit; int64_t diff = curr_value - prev_value_tmp; // Zigzag encode the value. @@ -410,6 +448,7 @@ int tsCompressStringImp(const char *const input, int inputSize, char *const outp int tsDecompressStringImp(const char *const input, int compressedSize, char *const output, int outputSize) { // compressedSize is the size of data after compression. + if (input[0] == 1) { /* It is compressed by LZ4 algorithm */ const int decompressed_size = LZ4_decompress_safe(input + 1, output, compressedSize - 1, outputSize); @@ -886,3 +925,72 @@ int tsDecompressFloatImp(const char *const input, const int nelements, char *con return nelements * FLOAT_BYTES; } + +#ifdef TD_TSZ +// +// ---------- float double lossy ----------- +// +int tsCompressFloatLossyImp(const char * input, const int nelements, char *const output){ + // compress with sz + int compressedSize = tdszCompress(SZ_FLOAT, input, nelements, output + 1); + unsigned char algo = ALGO_SZ_LOSSY << 1; + if (compressedSize == 0 || compressedSize >= nelements*sizeof(float)){ + // compressed error or large than original + output[0] = MODE_NOCOMPRESS | algo; + memcpy(output + 1, input, nelements * sizeof(float)); + compressedSize = 1 + nelements * sizeof(float); + } else { + // compressed successfully + output[0] = MODE_COMPRESS | algo; + compressedSize += 1; + } + + return compressedSize; +} + +int tsDecompressFloatLossyImp(const char * input, int compressedSize, const int nelements, char *const output){ + int decompressedSize = 0; + if( HEAD_MODE(input[0]) == MODE_NOCOMPRESS){ + // orginal so memcpy directly + decompressedSize = nelements * sizeof(float); + memcpy(output, input + 1, decompressedSize); + + return decompressedSize; + } + + // decompressed with sz + return tdszDecompress(SZ_FLOAT, input + 1, compressedSize - 1, nelements, output); +} + +int tsCompressDoubleLossyImp(const char * input, const int nelements, char *const output){ + // compress with sz + int compressedSize = tdszCompress(SZ_DOUBLE, input, nelements, output + 1); + unsigned char algo = ALGO_SZ_LOSSY << 1; + if (compressedSize == 0 || compressedSize >= nelements*sizeof(double)) { + // compressed error or large than original + output[0] = MODE_NOCOMPRESS | algo; + memcpy(output + 1, input, nelements * sizeof(double)); + compressedSize = 1 + nelements * sizeof(double); + } else { + // compressed successfully + output[0] = MODE_COMPRESS | algo; + compressedSize += 1; + } + + return compressedSize; +} + +int tsDecompressDoubleLossyImp(const char * input, int compressedSize, const int nelements, char *const output){ + int decompressedSize = 0; + if( HEAD_MODE(input[0]) == MODE_NOCOMPRESS){ + // orginal so memcpy directly + decompressedSize = nelements * sizeof(double); + memcpy(output, input + 1, decompressedSize); + + return decompressedSize; + } + + // decompressed with sz + return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output); +} +#endif diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index c4bd57760222ac3da7d25510cc2f434fe0cf0cac..5a3dc3f9bcdee41f974e48f22b27beb2a1eb5a35 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -61,6 +61,24 @@ static void taosReadFloatConfig(SGlobalCfg *cfg, char *input_value) { } } +static void taosReadDoubleConfig(SGlobalCfg *cfg, char *input_value) { + double value = atof(input_value); + double *option = (double *)cfg->ptr; + if (value < cfg->minValue || value > cfg->maxValue) { + uError("config option:%s, input value:%s, out of range[%f, %f], use default value:%f", + cfg->option, input_value, cfg->minValue, cfg->maxValue, *option); + } else { + if (cfg->cfgStatus <= TAOS_CFG_CSTATUS_FILE) { + *option = value; + cfg->cfgStatus = TAOS_CFG_CSTATUS_FILE; + } else { + uWarn("config option:%s, input value:%s, is configured by %s, use %f", cfg->option, input_value, + tsCfgStatusStr[cfg->cfgStatus], *option); + } + } +} + + static void taosReadInt32Config(SGlobalCfg *cfg, char *input_value) { int32_t value = atoi(input_value); int32_t *option = (int32_t *)cfg->ptr; @@ -151,7 +169,7 @@ static bool taosReadDirectoryConfig(SGlobalCfg *cfg, char *input_value) { wordfree(&full_path); - char tmp[1025] = {0}; + char tmp[PATH_MAX] = {0}; if (realpath(option, tmp) != NULL) { strcpy(option, tmp); } @@ -262,6 +280,9 @@ static void taosReadConfigOption(const char *option, char *value, char *value2, case TAOS_CFG_VTYPE_FLOAT: taosReadFloatConfig(cfg, value); break; + case TAOS_CFG_VTYPE_DOUBLE: + taosReadDoubleConfig(cfg, value); + break; case TAOS_CFG_VTYPE_STRING: taosReadStringConfig(cfg, value); break; @@ -312,6 +333,9 @@ void taosReadGlobalLogCfg() { #ifdef _TD_POWER_ printf("configDir:%s not there, use default value: /etc/power", configDir); strcpy(configDir, "/etc/power"); + #elif (_TD_TQ_ == true) + printf("configDir:%s not there, use default value: /etc/tq", configDir); + strcpy(configDir, "/etc/tq"); #else printf("configDir:%s not there, use default value: /etc/taos", configDir); strcpy(configDir, "/etc/taos"); @@ -327,7 +351,8 @@ void taosReadGlobalLogCfg() { printf("\nconfig file:%s not found, all variables are set to default\n", fileName); return; } - + + ssize_t _bytes = 0; size_t len = 1024; line = calloc(1, len); @@ -337,7 +362,12 @@ void taosReadGlobalLogCfg() { option = value = NULL; olen = vlen = 0; - tgetline(&line, &len, fp); + _bytes = tgetline(&line, &len, fp); + if (_bytes < 0) + { + break; + } + line[len - 1] = 0; paGetToken(line, &option, &olen); @@ -373,7 +403,8 @@ bool taosReadGlobalCfg() { return false; } } - + + ssize_t _bytes = 0; size_t len = 1024; line = calloc(1, len); @@ -383,7 +414,12 @@ bool taosReadGlobalCfg() { option = value = value2 = value3 = NULL; olen = vlen = vlen2 = vlen3 = 0; - tgetline(&line, &len, fp); + _bytes = tgetline(&line, &len, fp); + if (_bytes < 0) + { + break; + } + line[len - 1] = 0; paGetToken(line, &option, &olen); @@ -448,6 +484,9 @@ void taosPrintGlobalCfg() { case TAOS_CFG_VTYPE_FLOAT: uInfo(" %s:%s%f%s", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]); break; + case TAOS_CFG_VTYPE_DOUBLE: + uInfo(" %s:%s%f%s", cfg->option, blank, *((double *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; case TAOS_CFG_VTYPE_STRING: case TAOS_CFG_VTYPE_IPSTR: case TAOS_CFG_VTYPE_DIRECTORY: diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 1d37a6e9a4b740d2cfabb18bd3d2252fa6bfd034..42fc76e6c94227b6d8b5fb886239c42e19fc064d 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -109,11 +109,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line") +TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached") // mnode TAOS_DEFINE_ERROR(TSDB_CODE_MND_MSG_NOT_PROCESSED, "Message not processed") TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_IN_PROGRESS, "Message is progressing") -TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_NEED_REPROCESSED, "Messag need to be reprocessed") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACTION_NEED_REPROCESSED, "Message need to be reprocessed") TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_RIGHTS, "Insufficient privilege for operation") TAOS_DEFINE_ERROR(TSDB_CODE_MND_APP_ERROR, "Unexpected generic error in mnode") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONNECTION, "Invalid message connection") @@ -182,6 +185,14 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_ALREAY_EXIST, "Field already exists" TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_NOT_EXIST, "Field does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STABLE_NAME, "Super table does not exist") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CREATE_TABLE_MSG, "Invalid create table message") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_EXCEED_MAX_ROW_BYTES, "Exceed max row bytes") + +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_NAME, "Invalid func name") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_LEN, "Invalid func length") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_CODE, "Invalid func code") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FUNC_ALREADY_EXIST, "Func already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC, "Invalid func") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_FUNC_BUFSIZE, "Invalid func bufSize") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_NOT_SELECTED, "Database not specified or available") TAOS_DEFINE_ERROR(TSDB_CODE_MND_DB_ALREADY_EXIST, "Database already exists") @@ -207,6 +218,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, "No permission for dis TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, "Invalid message length") TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, "Action in progress") TAOS_DEFINE_ERROR(TSDB_CODE_DND_TOO_MANY_VNODES, "Too many vnode directories") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_EXITING, "Dnode is exiting") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, "Action in progress") @@ -268,6 +280,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, "Multiple retrieval of TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, "Too many time window in query") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, "Query buffer limit has reached") TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INCONSISTAN, "File inconsistance in replica") +TAOS_DEFINE_ERROR(TSDB_CODE_QRY_SYS_ERROR, "System error") // grant @@ -402,6 +415,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_TAG_VALUE_TOO_LONG, "tag value can not mor TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_NULL, "value not find") TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_OP_VALUE_TYPE, "value type should be boolean, number or string") +TAOS_DEFINE_ERROR(TSDB_CODE_HTTP_REQUEST_JSON_ERROR, "http request json error") + // odbc TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_OOM, "out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_CHAR_NOT_NUM, "convertion not a valid literal input") diff --git a/src/util/src/thashutil.c b/src/util/src/thashutil.c index ffe167977ba3fd08ac75f0b142c4fcb7db1d14a2..4a0208a3d0bf22f21b5f6a05513f435664e746af 100644 --- a/src/util/src/thashutil.c +++ b/src/util/src/thashutil.c @@ -126,15 +126,38 @@ _hash_fn_t taosGetDefaultHashFunction(int32_t type) { _hash_fn_t fn = NULL; switch(type) { case TSDB_DATA_TYPE_TIMESTAMP: - case TSDB_DATA_TYPE_BIGINT: fn = taosIntHash_64;break; - case TSDB_DATA_TYPE_BINARY: fn = MurmurHash3_32;break; - case TSDB_DATA_TYPE_NCHAR: fn = MurmurHash3_32;break; - case TSDB_DATA_TYPE_INT: fn = taosIntHash_32; break; - case TSDB_DATA_TYPE_SMALLINT: fn = taosIntHash_16; break; - case TSDB_DATA_TYPE_TINYINT: fn = taosIntHash_8; break; - case TSDB_DATA_TYPE_FLOAT: fn = taosFloatHash; break; - case TSDB_DATA_TYPE_DOUBLE: fn = taosDoubleHash; break; - default: fn = taosIntHash_32;break; + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_BIGINT: + fn = taosIntHash_64; + break; + case TSDB_DATA_TYPE_BINARY: + fn = MurmurHash3_32; + break; + case TSDB_DATA_TYPE_NCHAR: + fn = MurmurHash3_32; + break; + case TSDB_DATA_TYPE_UINT: + case TSDB_DATA_TYPE_INT: + fn = taosIntHash_32; + break; + case TSDB_DATA_TYPE_SMALLINT: + case TSDB_DATA_TYPE_USMALLINT: + fn = taosIntHash_16; + break; + case TSDB_DATA_TYPE_BOOL: + case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + fn = taosIntHash_8; + break; + case TSDB_DATA_TYPE_FLOAT: + fn = taosFloatHash; + break; + case TSDB_DATA_TYPE_DOUBLE: + fn = taosDoubleHash; + break; + default: + fn = taosIntHash_32; + break; } return fn; diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 7f127fc396a13f0a7796dcb4ce1dd63ce96cb951..88f57e8ac24cd207fc44f581564f45a9c33c348e 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -83,8 +83,10 @@ int64_t dbgWSize = 0; #ifdef _TD_POWER_ char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/power"; +#elif (_TD_TQ_ == true) +char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/tq"; #else -char tsLogDir[TSDB_FILENAME_LEN] = "/var/log/taos"; +char tsLogDir[PATH_MAX] = "/var/log/taos"; #endif static SLogObj tsLogObj = { .fileNum = 1 }; @@ -176,6 +178,8 @@ static void *taosThreadToOpenNewFile(void *param) { char keepName[LOG_FILE_NAME_LEN + 20]; sprintf(keepName, "%s.%d", tsLogObj.logName, tsLogObj.flag); + setThreadName("openNewFile"); + tsLogObj.flag ^= 1; tsLogObj.lines = 0; char name[LOG_FILE_NAME_LEN + 20]; @@ -685,6 +689,8 @@ static void taosWriteLog(SLogBuff *tLogBuff) { static void *taosAsyncOutputLog(void *param) { SLogBuff *tLogBuff = (SLogBuff *)param; + + setThreadName("asyncOutputLog"); while (1) { //tsem_wait(&(tLogBuff->buffNotEmpty)); diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 318a2d48609a129bcf6094455ff2a7cc8f7c0467..0bab7b7e6623ebaa5de6d6511fa1c43719372ef5 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -50,7 +50,9 @@ static void *taosNetBindUdpPort(void *sarg) { struct sockaddr_in server_addr; struct sockaddr_in clientAddr; - if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { + setThreadName("netBindUdpPort"); + + if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { uError("failed to create UDP socket since %s", strerror(errno)); return NULL; } @@ -106,13 +108,15 @@ static void *taosNetBindTcpPort(void *sarg) { struct sockaddr_in server_addr; struct sockaddr_in clientAddr; - STestInfo *pinfo = sarg; + STestInfo *pinfo = sarg; int32_t port = pinfo->port; SOCKET serverSocket; int32_t addr_len = sizeof(clientAddr); SOCKET client; char buffer[BUFFER_SIZE]; + setThreadName("netBindTcpPort"); + if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { uError("failed to create TCP socket since %s", strerror(errno)); return NULL; diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index c6d49dfb3d8242bdf697b3212fde479f01f8bcd9..b691abc5b9f6f828edcc46ec3a5989baa083f443 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -84,6 +84,8 @@ static void *taosThreadToOpenNewNote(void *param) { char name[NOTE_FILE_NAME_LEN * 2]; SNoteObj *pNote = (SNoteObj *)param; + setThreadName("openNewNote"); + pNote->flag ^= 1; pNote->lines = 0; sprintf(name, "%s.%d", pNote->name, pNote->flag); diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index 7caa1a6c37a873bae2741d2139cee808ae5a73b4..6a37f11ecef376e70f4eefbf6446150bd350cf07 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -173,10 +173,12 @@ int taosReadAllQitems(taos_queue param, taos_qall p2) { STaosQueue *queue = (STaosQueue *)param; STaosQall *qall = (STaosQall *)p2; int code = 0; + bool empty; pthread_mutex_lock(&queue->mutex); - if (queue->head) { + empty = queue->head == NULL; + if (!empty) { memset(qall, 0, sizeof(STaosQall)); qall->current = queue->head; qall->start = queue->head; @@ -188,11 +190,17 @@ int taosReadAllQitems(taos_queue param, taos_qall p2) { queue->tail = NULL; queue->numOfItems = 0; if (queue->qset) atomic_sub_fetch_32(&queue->qset->numOfItems, qall->numOfItems); - } + } pthread_mutex_unlock(&queue->mutex); - - return code; + + // if source queue is empty, we set destination qall to empty too. + if (empty) { + qall->current = NULL; + qall->start = NULL; + qall->numOfItems = 0; + } + return code; } int taosGetQitem(taos_qall param, int *type, void **pitem) { @@ -423,10 +431,22 @@ int taosReadAllQitemsFromQset(taos_qset param, taos_qall p2, void **phandle) { int taosGetQueueItemsNumber(taos_queue param) { STaosQueue *queue = (STaosQueue *)param; - return queue->numOfItems; + if (!queue) return 0; + + int num; + pthread_mutex_lock(&queue->mutex); + num = queue->numOfItems; + pthread_mutex_unlock(&queue->mutex); + return num; } int taosGetQsetItemsNumber(taos_qset param) { STaosQset *qset = (STaosQset *)param; - return qset->numOfItems; + if (!qset) return 0; + + int num = 0; + pthread_mutex_lock(&qset->mutex); + num = qset->numOfItems; + pthread_mutex_unlock(&qset->mutex); + return num; } diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index f014dd0fab5494bd85f197e2c79fac53359e8edf..3d3dfd989926c4acf8b32d8c7df724cb2d1ac079 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -108,39 +108,49 @@ void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) { void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl) { SSchedQueue* pSched = taosInitScheduler(queueSize, numOfThreads, label); - + if (tmrCtrl != NULL && pSched != NULL) { pSched->pTmrCtrl = tmrCtrl; taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); } - + return pSched; } -void *taosProcessSchedQueue(void *param) { +void *taosProcessSchedQueue(void *scheduler) { SSchedMsg msg; - SSchedQueue *pSched = (SSchedQueue *)param; + SSchedQueue *pSched = (SSchedQueue *)scheduler; + int ret = 0; + + setThreadName("schedQ"); while (1) { - if (tsem_wait(&pSched->fullSem) != 0) { - uError("wait %s fullSem failed(%s)", pSched->label, strerror(errno)); + if ((ret = tsem_wait(&pSched->fullSem)) != 0) { + uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno)); + exit(ret); } if (pSched->stop) { break; } - if (pthread_mutex_lock(&pSched->queueMutex) != 0) - uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) { + uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } msg = pSched->queue[pSched->fullSlot]; memset(pSched->queue + pSched->fullSlot, 0, sizeof(SSchedMsg)); pSched->fullSlot = (pSched->fullSlot + 1) % pSched->queueSize; - if (pthread_mutex_unlock(&pSched->queueMutex) != 0) - uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) { + uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } - if (tsem_post(&pSched->emptySem) != 0) - uError("post %s emptySem failed(%s)", pSched->label, strerror(errno)); + if ((ret = tsem_post(&pSched->emptySem)) != 0) { + uFatal("post %s emptySem failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } if (msg.fp) (*(msg.fp))(&msg); @@ -151,30 +161,37 @@ void *taosProcessSchedQueue(void *param) { return NULL; } -int taosScheduleTask(void *qhandle, SSchedMsg *pMsg) { - SSchedQueue *pSched = (SSchedQueue *)qhandle; +void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { + SSchedQueue *pSched = (SSchedQueue *)queueScheduler; + int ret = 0; + if (pSched == NULL) { uError("sched is not ready, msg:%p is dropped", pMsg); - return 0; + return; } - if (tsem_wait(&pSched->emptySem) != 0) { - uError("wait %s emptySem failed(%s)", pSched->label, strerror(errno)); + if ((ret = tsem_wait(&pSched->emptySem)) != 0) { + uFatal("wait %s emptySem failed(%s)", pSched->label, strerror(errno)); + exit(ret); } - if (pthread_mutex_lock(&pSched->queueMutex) != 0) - uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) { + uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } pSched->queue[pSched->emptySlot] = *pMsg; pSched->emptySlot = (pSched->emptySlot + 1) % pSched->queueSize; - if (pthread_mutex_unlock(&pSched->queueMutex) != 0) - uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); - - if (tsem_post(&pSched->fullSem) != 0) - uError("post %s fullSem failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) { + uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } - return 0; + if ((ret = tsem_post(&pSched->fullSem)) != 0) { + uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } } void taosCleanUpScheduler(void *param) { diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c index 842ded19a652fdfad7cb6b35c536f760ae866618..082b454bb59abb6f43008a63078f665fd38ff32d 100644 --- a/src/util/src/tskiplist.c +++ b/src/util/src/tskiplist.c @@ -546,7 +546,6 @@ static FORCE_INLINE int32_t getSkipListNodeRandomHeight(SSkipList *pSkipList) { const uint32_t factor = 4; int32_t n = 1; - #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) while ((rand() % factor) == 0 && n <= pSkipList->maxLevel) { #else @@ -682,143 +681,3 @@ static SSkipListNode *tSkipListPutImpl(SSkipList *pSkipList, void *pData, SSkipL return pNode; } - -// static int32_t tSkipListEndParQuery(SSkipList *pSkipList, SSkipListNode *pStartNode, SSkipListKey *pEndKey, -// int32_t cond, SSkipListNode ***pRes) { -// pthread_rwlock_rdlock(&pSkipList->lock); -// SSkipListNode *p = pStartNode; -// int32_t numOfRes = 0; -// -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pEndKey->nType); -// while (p != NULL) { -// int32_t ret = filterComparFn(&p->key, pEndKey); -// if (ret > 0) { -// break; -// } -// -// if (ret < 0) { -// numOfRes++; -// p = p->pForward[0]; -// } else if (ret == 0) { -// if (cond == TSDB_RELATION_LESS_EQUAL) { -// numOfRes++; -// p = p->pForward[0]; -// } else { -// break; -// } -// } -// } -// -// (*pRes) = (SSkipListNode **)malloc(POINTER_BYTES * numOfRes); -// for (int32_t i = 0; i < numOfRes; ++i) { -// (*pRes)[i] = pStartNode; -// pStartNode = pStartNode->pForward[0]; -// } -// pthread_rwlock_unlock(&pSkipList->lock); -// -// return numOfRes; -//} -// -///* -// * maybe return the copy of SSkipListNode would be better -// */ -// int32_t tSkipListGets(SSkipList *pSkipList, SSkipListKey *pKey, SSkipListNode ***pRes) { -// (*pRes) = NULL; -// -// SSkipListNode *pNode = tSkipListGet(pSkipList, pKey); -// if (pNode == NULL) { -// return 0; -// } -// -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// // backward check if previous nodes are with the same value. -// SSkipListNode *pPrev = pNode->pBackward[0]; -// while ((pPrev != &pSkipList->pHead) && filterComparFn(&pPrev->key, pKey) == 0) { -// pPrev = pPrev->pBackward[0]; -// } -// -// return tSkipListEndParQuery(pSkipList, pPrev->pForward[0], &pNode->key, TSDB_RELATION_LESS_EQUAL, pRes); -//} -// -// static SSkipListNode *tSkipListParQuery(SSkipList *pSkipList, SSkipListKey *pKey, int32_t cond) { -// int32_t sLevel = pSkipList->level - 1; -// int32_t ret = -1; -// -// SSkipListNode *x = &pSkipList->pHead; -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// pthread_rwlock_rdlock(&pSkipList->lock); -// -// if (cond == TSDB_RELATION_GREATER_EQUAL || cond == TSDB_RELATION_GREATER) { -// for (int32_t i = sLevel; i >= 0; --i) { -// while (x->pForward[i] != NULL && (ret = filterComparFn(&x->pForward[i]->key, pKey)) < 0) { -// x = x->pForward[i]; -// } -// } -// -// // backward check if previous nodes are with the same value. -// if (cond == TSDB_RELATION_GREATER_EQUAL && ret == 0) { -// SSkipListNode *pNode = x->pForward[0]; -// while ((pNode->pBackward[0] != &pSkipList->pHead) && (filterComparFn(&pNode->pBackward[0]->key, pKey) == 0)) { -// pNode = pNode->pBackward[0]; -// } -// pthread_rwlock_unlock(&pSkipList->lock); -// return pNode; -// } -// -// if (ret > 0 || cond == TSDB_RELATION_GREATER_EQUAL) { -// pthread_rwlock_unlock(&pSkipList->lock); -// return x->pForward[0]; -// } else { // cond == TSDB_RELATION_GREATER && ret == 0 -// SSkipListNode *pn = x->pForward[0]; -// while (pn != NULL && filterComparFn(&pn->key, pKey) == 0) { -// pn = pn->pForward[0]; -// } -// pthread_rwlock_unlock(&pSkipList->lock); -// return pn; -// } -// } -// -// pthread_rwlock_unlock(&pSkipList->lock); -// return NULL; -//} -// -// -// static bool removeSupport(SSkipList *pSkipList, SSkipListNode **forward, SSkipListKey *pKey) { -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// if (filterComparFn(&forward[0]->pForward[0]->key, pKey) == 0) { -// SSkipListNode *p = forward[0]->pForward[0]; -// doRemove(pSkipList, p, forward); -// } else { // failed to find the node of specified value,abort -// return false; -// } -// -// // compress the minimum level of skip list -// while (pSkipList->level > 0 && SL_NODE_GET_FORWARD_POINTER(pSkipList->pHead, pSkipList->level - 1) == NULL) { -// pSkipList->level -= 1; -// } -// -// return true; -//} -// -// bool tSkipListRemove(SSkipList *pSkipList, SSkipListKey *pKey) { -// SSkipListNode *forward[MAX_SKIP_LIST_LEVEL] = {0}; -// __compar_fn_t filterComparFn = getComparFunc(pSkipList, pKey->nType); -// -// pthread_rwlock_rdlock(&pSkipList->lock); -// -// SSkipListNode *x = &pSkipList->pHead; -// for (int32_t i = pSkipList->level - 1; i >= 0; --i) { -// while (x->pForward[i] != NULL && (filterComparFn(&x->pForward[i]->key, pKey) < 0)) { -// x = x->pForward[i]; -// } -// forward[i] = x; -// } -// -// bool ret = removeSupport(pSkipList, forward, pKey); -// pthread_rwlock_unlock(&pSkipList->lock); -// -// return ret; -//} diff --git a/src/util/src/tstrbuild.c b/src/util/src/tstrbuild.c index 61a6d67952a73b8efd0c20a45214a1546f1f0258..eec21d18354a4141be92530cda1a953e5efd89a8 100644 --- a/src/util/src/tstrbuild.c +++ b/src/util/src/tstrbuild.c @@ -69,12 +69,12 @@ void taosStringBuilderAppendNull(SStringBuilder* sb) { taosStringBuilderAppendSt void taosStringBuilderAppendInteger(SStringBuilder* sb, int64_t v) { char buf[64]; - size_t len = sprintf(buf, "%" PRId64, v); - taosStringBuilderAppendStringLen(sb, buf, len); + size_t len = snprintf(buf, sizeof(buf), "%" PRId64, v); + taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf))); } void taosStringBuilderAppendDouble(SStringBuilder* sb, double v) { - char buf[64]; - size_t len = sprintf(buf, "%.9lf", v); - taosStringBuilderAppendStringLen(sb, buf, len); + char buf[512]; + size_t len = snprintf(buf, sizeof(buf), "%.9lf", v); + taosStringBuilderAppendStringLen(sb, buf, MIN(len, sizeof(buf))); } diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 88c2a3369bb101ce6b7ae6d72fa1a7973ca732e4..c4d05b2d5a851ee4c2a8232095dd6ea5567213ab 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -219,8 +219,13 @@ static SKeyword keywordTable[] = { {"PARTITIONS", TK_PARTITIONS}, {"TOPIC", TK_TOPIC}, {"TOPICS", TK_TOPICS}, - {"COMPACT", TK_COMPACT}, - {"MODIFY", TK_MODIFY} + {"COMPACT", TK_COMPACT}, + {"MODIFY", TK_MODIFY}, + {"FUNCTION", TK_FUNCTION}, + {"FUNCTIONS", TK_FUNCTIONS}, + {"OUTPUTTYPE", TK_OUTPUTTYPE}, + {"AGGREGATE", TK_AGGREGATE}, + {"BUFSIZE", TK_BUFSIZE}, }; static const char isIdChar[] = { @@ -269,7 +274,7 @@ static int32_t tKeywordCode(const char* z, int n) { if (keywordHashTable == NULL) { return TK_ILLEGAL; } - + SKeyword** pKey = (SKeyword**)taosHashGet(keywordHashTable, key, n); return (pKey != NULL)? (*pKey)->type:TK_ID; } diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c index bda52936f90d07b1fde598de9ca683c8a1b8b82a..1a73991ade1ea4617fc4d3dab3904652ff46d691 100644 --- a/src/util/src/tutil.c +++ b/src/util/src/tutil.c @@ -427,13 +427,23 @@ char *taosIpStr(uint32_t ipInt) { } FORCE_INLINE float taos_align_get_float(const char* pBuf) { - float fv = 0; - *(int32_t*)(&fv) = *(int32_t*)pBuf; +#if __STDC_VERSION__ >= 201112L + static_assert(sizeof(float) == sizeof(uint32_t), "sizeof(float) must equal to sizeof(uint32_t)"); +#else + assert(sizeof(float) == sizeof(uint32_t)); +#endif + float fv = 0; + memcpy(&fv, pBuf, sizeof(fv)); // in ARM, return *((const float*)(pBuf)) may cause problem return fv; } FORCE_INLINE double taos_align_get_double(const char* pBuf) { - double dv = 0; - *(int64_t*)(&dv) = *(int64_t*)pBuf; +#if __STDC_VERSION__ >= 201112L + static_assert(sizeof(double) == sizeof(uint64_t), "sizeof(double) must equal to sizeof(uint64_t)"); +#else + assert(sizeof(double) == sizeof(uint64_t)); +#endif + double dv = 0; + memcpy(&dv, pBuf, sizeof(dv)); // in ARM, return *((const double*)(pBuf)) may cause problem return dv; } diff --git a/src/util/tests/CMakeLists.txt b/src/util/tests/CMakeLists.txt index ee99348cd9db86923f2ba06da9b3452d2dcc0347..a60c6cff2809dcc2a55f5cce3e593ef06045a975 100644 --- a/src/util/tests/CMakeLists.txt +++ b/src/util/tests/CMakeLists.txt @@ -1,15 +1,16 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) FIND_PATH(HEADER_GTEST_INCLUDE_DIR gtest.h /usr/include/gtest /usr/local/include/gtest) -FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib) +FIND_LIBRARY(LIB_GTEST_STATIC_DIR libgtest.a /usr/lib/ /usr/local/lib /usr/lib64) +FIND_LIBRARY(LIB_GTEST_SHARED_DIR libgtest.so /usr/lib/ /usr/local/lib /usr/lib64) -IF (HEADER_GTEST_INCLUDE_DIR AND LIB_GTEST_STATIC_DIR) +IF (HEADER_GTEST_INCLUDE_DIR AND (LIB_GTEST_STATIC_DIR OR LIB_GTEST_SHARED_DIR)) MESSAGE(STATUS "gTest library found, build unit test") INCLUDE_DIRECTORIES(${HEADER_GTEST_INCLUDE_DIR}) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) - + LIST(REMOVE_ITEM SOURCE_LIST ${CMAKE_CURRENT_SOURCE_DIR}/trefTest.c) ADD_EXECUTABLE(utilTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES(utilTest tutil common os gtest pthread gcov) diff --git a/src/util/tests/arrayTest.cpp b/src/util/tests/arrayTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..94b08ca6d7c1e9a6555407dfe7aa870233911d5a --- /dev/null +++ b/src/util/tests/arrayTest.cpp @@ -0,0 +1,50 @@ +#include +#include +#include +#include + +#include "tarray.h" + +namespace { + +static void remove_batch_test() { + SArray *pa = (SArray*) taosArrayInit(4, sizeof(int32_t)); + + for(int32_t i = 0; i < 20; ++i) { + int32_t a = i; + taosArrayPush(pa, &a); + } + + SArray* delList = (SArray*)taosArrayInit(4, sizeof(int32_t)); + taosArrayRemoveBatch(pa, (const int32_t*) TARRAY_GET_START(delList), taosArrayGetSize(delList)); + EXPECT_EQ(taosArrayGetSize(pa), 20); + + int32_t a = 5; + taosArrayPush(delList, &a); + + taosArrayRemoveBatch(pa, (const int32_t*) TARRAY_GET_START(delList), taosArrayGetSize(delList)); + EXPECT_EQ(taosArrayGetSize(pa), 19); + EXPECT_EQ(*(int*)taosArrayGet(pa, 5), 6); + + taosArrayInsert(pa, 5, &a); + EXPECT_EQ(taosArrayGetSize(pa), 20); + EXPECT_EQ(*(int*)taosArrayGet(pa, 5), 5); + + taosArrayClear(delList); + + a = 6; + taosArrayPush(delList, &a); + + a = 9; + taosArrayPush(delList, &a); + + a = 14; + taosArrayPush(delList, &a); + taosArrayRemoveBatch(pa, (const int32_t*) TARRAY_GET_START(delList), taosArrayGetSize(delList)); + EXPECT_EQ(taosArrayGetSize(pa), 17); +} +} // namespace + +TEST(arrayTest, array_list_test) { + remove_batch_test(); +} diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp index 0a4791f6a99d9fa88280a7c504b401c3b9b696bf..970f1c23a9971af1e8dc8c6c89ad7805fce1457c 100644 --- a/src/util/tests/cacheTest.cpp +++ b/src/util/tests/cacheTest.cpp @@ -5,10 +5,6 @@ #include "taos.h" #include "tcache.h" -namespace { -int32_t tsMaxMgmtConnections = 10000; -int32_t tsMaxMeterConnections = 200; -} // test cache TEST(testCase, client_cache_test) { const int32_t REFRESH_TIME_IN_SEC = 2; @@ -43,7 +39,7 @@ TEST(testCase, client_cache_test) { sleep(3); char* d = (char*) taosCacheAcquireByKey(tscMetaCache, key3, strlen(key3)); -// assert(d == NULL); + assert(d == NULL); char key5[] = "test5"; char data5[] = "data5kkkkk"; @@ -102,7 +98,7 @@ TEST(testCase, cache_resize_test) { char key[256] = {0}; char data[1024] = "abcdefghijk"; - int32_t len = strlen(data); +// int32_t len = strlen(data); uint64_t startTime = taosGetTimestampUs(); int32_t num = 10000; diff --git a/src/util/tests/skiplistTest.cpp b/src/util/tests/skiplistTest.cpp index 2203ae8e4fc75c2aa0267055f79884f05e3a65e9..dfbe0f67167ad12cedc0239fc310614ef747b080 100644 --- a/src/util/tests/skiplistTest.cpp +++ b/src/util/tests/skiplistTest.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include "os.h" @@ -8,12 +9,14 @@ #include "tskiplist.h" #include "tutil.h" +#if 0 namespace { char* getkey(const void* data) { return (char*)(data); } void doubleSkipListTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_DOUBLE, sizeof(double), 0, false, true, getkey); + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_DOUBLE, sizeof(double), + getKeyComparFunc(TSDB_DATA_TYPE_DOUBLE), false, getkey); double doubleVal[1000] = {0}; int32_t size = 20000; @@ -25,18 +28,15 @@ void doubleSkipListTest() { doubleVal[i] = i * 0.997; } - int32_t level = 0; - int32_t size = 0; - - tSkipListNewNodeInfo(pSkipList, &level, &size); - auto d = (SSkipListNode*)calloc(1, size + sizeof(double) * 2); - d->level = level; +// int32_t level = 0; + size = 0; - double* key = (double*)SL_GET_NODE_KEY(pSkipList, d); - key[0] = i * 0.997; - key[1] = i * 0.997; + // tSkipListNewNodeInfo(pSkipList, &level, &size); + // auto d = (SSkipListNode*)calloc(1, size + sizeof(double) * 2); + // d->level = level; - tSkipListPut(pSkipList, d); + double key = 0.997; + tSkipListPut(pSkipList, &key); } printf("the first level of skip list is:\n"); @@ -70,7 +70,8 @@ void doubleSkipListTest() { } void randKeyTest() { - SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), 0, false, true, getkey); + SSkipList* pSkipList = tSkipListCreate(10, TSDB_DATA_TYPE_INT, sizeof(int32_t), getKeyComparFunc(TSDB_DATA_TYPE_INT), + false, getkey); int32_t size = 200000; srand(time(NULL)); @@ -375,3 +376,5 @@ TEST(testCase, skiplist_test) { free(pKeys);*/ } + +#endif \ No newline at end of file diff --git a/src/util/tests/trefTest.c b/src/util/tests/trefTest.c index 454860410b4dd0c3fdf65c4b7fd7950a1a7d4446..fe3dcab201de3f5b5068c997f3759a5b8397b5b7 100644 --- a/src/util/tests/trefTest.c +++ b/src/util/tests/trefTest.c @@ -14,18 +14,18 @@ typedef struct { int refNum; int steps; int rsetId; - int64_t rid; + int64_t*rid; void **p; } SRefSpace; void iterateRefs(int rsetId) { int count = 0; - void *p = taosIterateRef(rsetId, NULL); + void *p = taosIterateRef(rsetId, 0); while (p) { // process P count++; - p = taosIterateRef(rsetId, p); + p = taosIterateRef(rsetId, (int64_t) p); } printf(" %d ", count); @@ -34,7 +34,8 @@ void iterateRefs(int rsetId) { void *addRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - int64_t rid; + + setThreadName("addRef"); for (int i=0; i < pSpace->steps; ++i) { printf("a"); @@ -51,8 +52,9 @@ void *addRef(void *param) { void *removeRef(void *param) { SRefSpace *pSpace = (SRefSpace *)param; - int id; - int64_t rid; + int id, code; + + setThreadName("removeRef"); for (int i=0; i < pSpace->steps; ++i) { printf("d"); @@ -71,16 +73,17 @@ void *removeRef(void *param) { void *acquireRelease(void *param) { SRefSpace *pSpace = (SRefSpace *)param; int id; - int64_t rid; + + setThreadName("acquireRelease"); for (int i=0; i < pSpace->steps; ++i) { printf("a"); id = random() % pSpace->refNum; - void *p = taosAcquireRef(pSpace->rsetId, pSpace->p[id]); + void *p = taosAcquireRef(pSpace->rsetId, (int64_t) pSpace->p[id]); if (p) { usleep(id % 5 + 1); - taosReleaseRef(pSpace->rsetId, pSpace->p[id]); + taosReleaseRef(pSpace->rsetId, (int64_t) pSpace->p[id]); } } @@ -94,6 +97,8 @@ void myfree(void *p) { void *openRefSpace(void *param) { SRefSpace *pSpace = (SRefSpace *)param; + setThreadName("openRefSpace"); + printf("c"); pSpace->rsetId = taosOpenRef(50, myfree); @@ -103,6 +108,7 @@ void *openRefSpace(void *param) { } pSpace->p = (void **) calloc(sizeof(void *), pSpace->refNum); + pSpace->rid = calloc(pSpace->refNum, sizeof(int64_t)); pthread_attr_t thattr; pthread_attr_init(&thattr); diff --git a/src/vnode/CMakeLists.txt b/src/vnode/CMakeLists.txt index 3fefbea05ba763dfa856dd52c195d36ce70ccd91..6238f43d32ad2ed973f522aca3bb5dfca9101435 100644 --- a/src/vnode/CMakeLists.txt +++ b/src/vnode/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc) diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index d770a38e371c9920c438d810f699ab399be15833..ef05cf4a4063625d8e2810503e541fd32a7f8f62 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -41,6 +41,8 @@ typedef struct { int32_t queuedWMsg; int32_t queuedRMsg; int32_t flowctrlLevel; + int8_t preClose; // drop and close switch + int8_t reserved[3]; int64_t sequence; // for topic int8_t status; int8_t role; diff --git a/src/vnode/src/vnodeBackup.c b/src/vnode/src/vnodeBackup.c index a0a975be2bcfbb2c945a72adecbf47f9cf404b40..801af42e0e6869944ec60169b0662131be787cba 100644 --- a/src/vnode/src/vnodeBackup.c +++ b/src/vnode/src/vnodeBackup.c @@ -61,6 +61,8 @@ static void vnodeProcessBackupMsg(SVBackupMsg *pMsg) { } static void *vnodeBackupFunc(void *param) { + setThreadName("vnodeBackup"); + while (1) { SVBackupMsg *pMsg = NULL; if (taosReadQitemFromQset(tsVBackupQset, NULL, (void **)&pMsg, NULL) == 0) { diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index 074f0f681faaa89ccf7a14da0da43774a76d647b..f826c1aecd336a0eedeb3f02df0a7acc61895bb2 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -47,9 +47,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { return terrno; } - char rootDir[TSDB_FILENAME_LEN] = {0}; - sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId); - char vnodeDir[TSDB_FILENAME_LEN] = "\0"; snprintf(vnodeDir, TSDB_FILENAME_LEN, "/vnode/vnode%d", pVnodeCfg->cfg.vgId); if (tfsMkdir(vnodeDir) < 0) { @@ -63,23 +60,6 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { return code; } - // STsdbCfg tsdbCfg = {0}; - // tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId; - // tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize; - // tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks; - // tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile; - // tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep; - // tsdbCfg.keep1 = pVnodeCfg->cfg.daysToKeep1; - // tsdbCfg.keep2 = pVnodeCfg->cfg.daysToKeep2; - // tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock; - // tsdbCfg.maxRowsPerFileBlock = pVnodeCfg->cfg.maxRowsPerFileBlock; - // tsdbCfg.precision = pVnodeCfg->cfg.precision; - // tsdbCfg.compression = pVnodeCfg->cfg.compression; - // tsdbCfg.update = pVnodeCfg->cfg.update; - // tsdbCfg.cacheLastRow = pVnodeCfg->cfg.cacheLastRow; - - // char tsdbDir[TSDB_FILENAME_LEN] = {0}; - // sprintf(tsdbDir, "vnode/vnode%d/tsdb", pVnodeCfg->cfg.vgId); if (tsdbCreateRepo(pVnodeCfg->cfg.vgId) < 0) { vError("vgId:%d, failed to create tsdb in vnode, reason:%s", pVnodeCfg->cfg.vgId, tstrerror(terrno)); return TSDB_CODE_VND_INIT_FAILED; @@ -93,7 +73,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { } int32_t vnodeSync(int32_t vgId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); + SVnodeObj *pVnode = vnodeAcquireNotClose(vgId); if (pVnode == NULL) { vDebug("vgId:%d, failed to sync, vnode not find", vgId); return TSDB_CODE_VND_INVALID_VGROUP_ID; @@ -116,7 +96,7 @@ int32_t vnodeSync(int32_t vgId) { int32_t vnodeDrop(int32_t vgId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); + SVnodeObj *pVnode = vnodeAcquireNotClose(vgId); if (pVnode == NULL) { vDebug("vgId:%d, failed to drop, vnode not find", vgId); return TSDB_CODE_VND_INVALID_VGROUP_ID; @@ -439,15 +419,16 @@ int32_t vnodeOpen(int32_t vgId) { } int32_t vnodeClose(int32_t vgId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); + SVnodeObj *pVnode = vnodeAcquireNotClose(vgId); if (pVnode == NULL) return 0; if (pVnode->dropped) { vnodeRelease(pVnode); return 0; } + pVnode->preClose = 1; + vDebug("vgId:%d, vnode will be closed, pVnode:%p", pVnode->vgId, pVnode); - vnodeRemoveFromHash(pVnode); vnodeRelease(pVnode); vnodeCleanUp(pVnode); diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 5a0bafe82301d568da20cd45b0daeeb37995127d..8b17d3a5f2b8871aa83d4daf81ff936773de736a 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -125,6 +125,18 @@ void vnodeRelease(void *vparam) { } } +void *vnodeAcquireNotClose(int32_t vgId) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode != NULL && pVnode->preClose == 1) { + vnodeRelease(pVnode); + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + vDebug("vgId:%d, not exist, pre closing", vgId); + return NULL; + } + + return pVnode; +} + static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) { int64_t totalStorage = 0; int64_t compStorage = 0; @@ -188,7 +200,7 @@ void vnodeBuildStatusMsg(void *param) { void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) { for (int32_t i = 0; i < numOfVnodes; ++i) { pAccess[i].vgId = htonl(pAccess[i].vgId); - SVnodeObj *pVnode = vnodeAcquire(pAccess[i].vgId); + SVnodeObj *pVnode = vnodeAcquireNotClose(pAccess[i].vgId); if (pVnode != NULL) { pVnode->accessState = pAccess[i].accessState; if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) { diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 3964975e355db3ced1c86e28611ef202dd24d9e3..6bc009209be5788146e6d3f65439a266d7ce0522 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -231,7 +231,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { if (contLen != 0) { qinfo_t pQInfo = NULL; uint64_t qId = genQueryId(); - code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, &qId); + code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo, qId); SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); pRsp->code = code; diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 4197428fec6b5d24e7791b2a5f8cb7df229cbca5..2bdfd2ead3a31d8c2cba94d93239de965d2e07dc 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -95,7 +95,7 @@ void vnodeCtrlFlow(int32_t vgId, int32_t level) { } void vnodeStartSyncFile(int32_t vgId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); + SVnodeObj *pVnode = vnodeAcquireNotClose(vgId); if (pVnode == NULL) { vError("vgId:%d, vnode not found while start filesync", vgId); return; @@ -155,7 +155,7 @@ int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rpara } int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { - SVnodeObj *pVnode = vnodeAcquire(vgId); + SVnodeObj *pVnode = vnodeAcquireNotClose(vgId); if (pVnode == NULL) { vError("vgId:%d, vnode not found while write to cache", vgId); return -1; diff --git a/src/vnode/src/vnodeWorker.c b/src/vnode/src/vnodeWorker.c index 6fb79d10feef74d88e04368c5607d39e7d70f6f2..e94c99cbea99139a21fb7fb64c729a12d3091349 100644 --- a/src/vnode/src/vnodeWorker.c +++ b/src/vnode/src/vnodeWorker.c @@ -188,6 +188,8 @@ static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) { } static void *vnodeMWorkerFunc(void *param) { + setThreadName("vnodeMWorker"); + while (1) { SVMWorkerMsg *pMsg = NULL; if (taosReadQitemFromQset(tsVMWorkerQset, NULL, (void **)&pMsg, NULL) == 0) { diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 555eda6d13eeb1dbbb83fbd89ee2672966aa8539..743398d8344b8430a71633fe2455bca4e5ae1682 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -90,7 +90,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // forward to peers, even it is WAL/FWD, it shall be called to update version in sync int32_t syncCode = 0; - bool force = (pWrite == NULL ? false : pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT); + bool force = (pWrite == NULL ? false : pWrite->walHead.msgType != TSDB_MSG_TYPE_SUBMIT); syncCode = syncForwardToPeer(pVnode->sync, pHead, pWrite, qtype, force); if (syncCode < 0) { pHead->version = 0; @@ -237,7 +237,7 @@ static SVWriteMsg *vnodeBuildVWriteMsg(SVnodeObj *pVnode, SWalHead *pHead, int32 return NULL; } - int32_t size = sizeof(SVWriteMsg) + sizeof(SWalHead) + pHead->len; + int32_t size = sizeof(SVWriteMsg) + pHead->len; SVWriteMsg *pWrite = taosAllocateQitem(size); if (pWrite == NULL) { terrno = TSDB_CODE_VND_OUT_OF_MEMORY; @@ -248,7 +248,7 @@ static SVWriteMsg *vnodeBuildVWriteMsg(SVnodeObj *pVnode, SWalHead *pHead, int32 pWrite->rpcMsg = *pRpcMsg; } - memcpy(&pWrite->pHead, pHead, sizeof(SWalHead) + pHead->len); + memcpy(&pWrite->walHead, pHead, sizeof(SWalHead) + pHead->len); pWrite->pVnode = pVnode; pWrite->qtype = qtype; @@ -286,7 +286,7 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); - int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + int64_t queuedSize = atomic_add_fetch_64(&pVnode->queuedWMsgSize, pWrite->walHead.len); if (queued > MAX_QUEUED_MSG_NUM || queuedSize > MAX_QUEUED_MSG_SIZE) { int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; @@ -330,7 +330,7 @@ void vnodeFreeFromWQueue(void *vparam, SVWriteMsg *pWrite) { SVnodeObj *pVnode = vparam; if (pVnode) { int32_t queued = atomic_sub_fetch_32(&pVnode->queuedWMsg, 1); - int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->pHead.len); + int64_t queuedSize = atomic_sub_fetch_64(&pVnode->queuedWMsgSize, pWrite->walHead.len); vTrace("vgId:%d, msg:%p, app:%p, free from vwqueue, queued:%d size:%" PRId64, pVnode->vgId, pWrite, pWrite->rpcMsg.ahandle, queued, queuedSize); @@ -396,10 +396,13 @@ static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { } void vnodeWaitWriteCompleted(SVnodeObj *pVnode) { + int32_t extraSleep = 0; while (pVnode->queuedWMsg > 0) { vTrace("vgId:%d, queued wmsg num:%d", pVnode->vgId, pVnode->queuedWMsg); taosMsleep(10); + extraSleep = 1; } - taosMsleep(900); + if (extraSleep) + taosMsleep(900); } diff --git a/src/wal/CMakeLists.txt b/src/wal/CMakeLists.txt index a89024dab5060b1f18174f769e0d70c00ad00faf..0d9be42bd5d54ddd1fdd372511e4f98fb7d6355b 100644 --- a/src/wal/CMakeLists.txt +++ b/src/wal/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) ADD_DEFINITIONS(-DWAL_CHECKSUM_WHOLE) diff --git a/src/wal/src/walMgmt.c b/src/wal/src/walMgmt.c index 55ab9b031beaa35da49523af21c581a5aae47e3b..45f65b2c2fc9ae5412f471805a3244644e590638 100644 --- a/src/wal/src/walMgmt.c +++ b/src/wal/src/walMgmt.c @@ -36,9 +36,16 @@ static int32_t walInitObj(SWal *pWal); static void walFreeObj(void *pWal); int32_t walInit() { + int32_t code = 0; tsWal.refId = taosOpenRef(TSDB_MIN_VNODES, walFreeObj); - int32_t code = walCreateThread(); + code = pthread_mutex_init(&tsWal.mutex, NULL); + if (code) { + wError("failed to init wal mutex since %s", tstrerror(code)); + return code; + } + + code = walCreateThread(); if (code != TSDB_CODE_SUCCESS) { wError("failed to init wal module since %s", tstrerror(code)); return code; @@ -51,6 +58,7 @@ int32_t walInit() { void walCleanUp() { walStopThread(); taosCloseRef(tsWal.refId); + pthread_mutex_destroy(&tsWal.mutex); wInfo("wal module is cleaned up"); } @@ -183,10 +191,16 @@ static void walFsyncAll() { } static void *walThreadFunc(void *param) { + int stop = 0; + setThreadName("walThrd"); while (1) { walUpdateSeq(); walFsyncAll(); - if (tsWal.stop) break; + + pthread_mutex_lock(&tsWal.mutex); + stop = tsWal.stop; + pthread_mutex_unlock(&tsWal.mutex); + if (stop) break; } return NULL; @@ -209,7 +223,10 @@ static int32_t walCreateThread() { } static void walStopThread() { + pthread_mutex_lock(&tsWal.mutex); tsWal.stop = 1; + pthread_mutex_unlock(&tsWal.mutex); + if (taosCheckPthreadValid(tsWal.thread)) { pthread_join(tsWal.thread, NULL); } diff --git a/src/wal/test/CMakeLists.txt b/src/wal/test/CMakeLists.txt index 071ff6fdba084b7bd9a4f6f01c43eac06c774b29..c5bc4198f10d48caf2ea133c475ea99c8e7a2fd2 100644 --- a/src/wal/test/CMakeLists.txt +++ b/src/wal/test/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_LINUX) diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c index 9a52a2ca833e671fe96acf6800f15e44fba292e3..505728fbe4c4a6fbc126aa18ff6db93a28388173 100644 --- a/src/wal/test/waltest.c +++ b/src/wal/test/waltest.c @@ -19,6 +19,7 @@ #include "tglobal.h" #include "tlog.h" #include "twal.h" +#include "tfile.h" int64_t ver = 0; void *pWal = NULL; @@ -36,7 +37,7 @@ int writeToQueue(void *pVnode, void *data, int type, void *pMsg) { } int main(int argc, char *argv[]) { - char path[128] = "/home/jhtao/test/wal"; + char path[128] = "/tmp/wal"; int level = 2; int total = 5; int rows = 10000; @@ -72,9 +73,11 @@ int main(int argc, char *argv[]) { printf(" [-h help]: print out this help\n\n"); exit(0); } - } + } taosInitLog("wal.log", 100000, 10); + tfInit(); + walInit(); SWalCfg walCfg = {0}; walCfg.walLevel = level; @@ -122,13 +125,13 @@ int main(int argc, char *argv[]) { printf("index:%" PRId64 " wal:%s\n", index, name); if (code == 0) break; - - index++; } getchar(); walClose(pWal); + walCleanUp(); + tfCleanup(); return 0; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 4e7e9a87ea6810c362bd676cd9152f61bc08e29d..e21905af3b88cd6628c5b83471ff70013dc996fc 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -3,7 +3,7 @@ # generate release version: # mkdir release; cd release; cmake -DCMAKE_BUILD_TYPE=Release .. -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) SET(CMAKE_C_STANDARD 11) diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index 0c1f651059714e6e32c3ec7e0a74ed22e05a6f3e..c75427b5f4e568553dbcd9e2686f529a2745c029 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -110,16 +110,8 @@ pipeline { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${WKC}/tests/examples/JDBC/JDBCDemo/ - mvn clean package assembly:single -DskipTests >/dev/null - java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/src/connector/jdbc - mvn clean package -Dmaven.test.skip=true >/dev/null - cd ${WKC}/tests/examples/JDBC/JDBCDemo/ - java --class-path=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + mvn clean package >/dev/null + java -jar target/JdbcRestfulDemo-jar-with-dependencies.jar ''' } catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { diff --git a/tests/comparisonTest/cassandra/cassandratest/pom.xml b/tests/comparisonTest/cassandra/cassandratest/pom.xml index 8eeb5c3aa092ba360256a0e02ccdd9cead113b95..00630d93d197379e04268ef940a8e4db282d8186 100644 --- a/tests/comparisonTest/cassandra/cassandratest/pom.xml +++ b/tests/comparisonTest/cassandra/cassandratest/pom.xml @@ -75,7 +75,7 @@ junit junit - 4.11 + 4.13.1 test diff --git a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml index e0ada8b763eca8208260ebde73ce8fb192e917db..b55a136c7393c2faa857edec801a27721a1eff20 100644 --- a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml +++ b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml @@ -87,14 +87,14 @@ junit junit - 4.11 + 4.13.1 test com.google.guava guava - 29.0-jre + 30.0-jre diff --git a/tests/comparisonTest/tdengine/CMakeLists.txt b/tests/comparisonTest/tdengine/CMakeLists.txt index 36ed3efe191c9d949d6234bd61ffbbe28c3a33d2..0f389c4c0cefd10fe829d86342bc391cffe37901 100644 --- a/tests/comparisonTest/tdengine/CMakeLists.txt +++ b/tests/comparisonTest/tdengine/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) IF (TD_LINUX) diff --git a/tests/comparisonTest/tdengine/tdengineTest.c b/tests/comparisonTest/tdengine/tdengineTest.c index d1cf3a1f98d7fa600cbaf9bcadad42428a799234..0de419e036f6af1e1808492fbb9f7b41d785b8df 100644 --- a/tests/comparisonTest/tdengine/tdengineTest.c +++ b/tests/comparisonTest/tdengine/tdengineTest.c @@ -181,8 +181,8 @@ void writeDataImp(void *param) { if (lastMachineid != machineid) { lastMachineid = machineid; - sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d) values", - machineid, machineid, machinename, machinegroup); + sqlLen += sprintf(sql + sqlLen, " dev%d values", + machineid); } sqlLen += sprintf(sql + sqlLen, "(%" PRId64 ",%d,%f)", timestamp, temperature, humidity); @@ -192,7 +192,8 @@ void writeDataImp(void *param) { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { - printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos)); + printf("insert into dev%d values (%" PRId64 ",%d,%f)\n",machineid, timestamp, temperature, humidity); + printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(result)); } taos_free_result(result); @@ -210,6 +211,7 @@ void writeDataImp(void *param) { result = taos_query(taos, sql); code = taos_errno(result); if (code != 0) { + // printf("insert into dev%d using devices tags(%d,'%s',%d) values (%" PRId64 ",%d,%f)",machineid, machineid, machinename, machinegroup, timestamp, temperature, humidity); printf("thread:%d error:%d reason:%s\n", pThread->threadId, code, taos_errstr(taos)); } taos_free_result(result); @@ -246,7 +248,7 @@ void writeData() { taos_free_result(result); result = taos_query(taos, - "create table if not exists db.devices(ts timestamp, temperature int, humidity float) " + "create stable if not exists db.devices(ts timestamp, temperature int, humidity float) " "tags(devid int, devname binary(16), devgroup int)"); code = taos_errno(result); if (code != 0) { @@ -254,6 +256,77 @@ void writeData() { } taos_free_result(result); + //create tables before insert the data + result = taos_query(taos, "use db"); + code = taos_errno(result); + if (code != 0) { + taos_error(result, taos); + } + taos_free_result(result); + + char *sql = calloc(1, 8*1024*1024); + int sqlLen = 0; + int lastMachineid = 0; + int counter = 0; + int totalRecords = 0; + for (int i = 0; i < arguments.filesNum; i++) { + char fileName[300]; + sprintf(fileName, "%s/testdata%d.csv", arguments.dataDir, i); + + FILE *fp = fopen(fileName, "r"); + if (fp == NULL) { + printf("failed to open file %s\n", fileName); + exit(1); + } + printf("open file %s success\n", fileName); + + char *line = NULL; + size_t len = 0; + while (!feof(fp)) { + free(line); + line = NULL; + len = 0; + + getline(&line, &len, fp); + if (line == NULL) break; + + if (strlen(line) < 10) continue; + + int machineid; + char machinename[16]; + int machinegroup; + int64_t timestamp; + int temperature; + float humidity; + sscanf(line, "%d%s%d%" PRId64 "%d%f", &machineid, machinename, &machinegroup, ×tamp, &temperature, &humidity); + + if (counter == 0) { + sqlLen = sprintf(sql, "create table if not exists"); + } + + if (lastMachineid != machineid) { + lastMachineid = machineid; + sqlLen += sprintf(sql + sqlLen, " dev%d using devices tags(%d,'%s',%d)", machineid, machineid, machinename, machinegroup); + } + counter++; + + if (counter >= arguments.rowsPerRequest) { + result = taos_query(taos, sql); + code = taos_errno(result); + if (code != 0) { + printf("create table error:%d reason:%s\n", code, taos_errstr(result)); + } + taos_free_result(result); + + totalRecords += counter; + counter = 0; + lastMachineid = -1; + sqlLen = 0; + } + } + fclose(fp); + } + int64_t st = getTimeStampMs(); int a = arguments.filesNum / arguments.clients; @@ -379,5 +452,4 @@ void readData() { } free(threads); -} - +} \ No newline at end of file diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 66e866a2d3753b574ba6a4b180fc5740dd4aeef6..ca8cd24030c7dbfc23dba3caef99c051f8416dcf 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -9,11 +9,15 @@ SNAPSHOT jar + + src/main/resources/assembly + + com.taosdata.jdbc taos-jdbcdriver - 2.0.22 + 2.0.30 @@ -22,20 +26,60 @@ org.apache.maven.plugins maven-assembly-plugin - 3.1.0 - - - - com.taosdata.example.JDBCDemo - - - - jar-with-dependencies - - + 3.3.0 + + + JdbcDemo + + JdbcDemo + + + com.taosdata.example.JdbcDemo + + + + jar-with-dependencies + + + package + + single + + + + + JdbcRestfulDemo + + JdbcRestfulDemo + + + com.taosdata.example.JdbcRestfulDemo + + + + jar-with-dependencies + + + package + + single + + + - make-assembly + SubscribeDemo + + SubscribeDemo + + + com.taosdata.example.SubscribeDemo + + + + jar-with-dependencies + + package single diff --git a/tests/examples/JDBC/JDBCDemo/readme.md b/tests/examples/JDBC/JDBCDemo/readme.md index 94844420859a2cc56842e95e4ac8c664c8ffa2e7..da638a0bcc485cb3d73f75b59348ec260cc871d2 100644 --- a/tests/examples/JDBC/JDBCDemo/readme.md +++ b/tests/examples/JDBC/JDBCDemo/readme.md @@ -11,12 +11,12 @@ Download the tdengine package on our website: ``https://www.taosdata.com/cn/all- ## Run jdbcDemo using mvn plugin run command: ``` -mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JDBCDemo" +mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcDemo" ``` and run with your customed args ``` -mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JDBCDemo" -Dexec.args="-host [HOSTNAME]" +mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcDemo" -Dexec.args="-host [HOSTNAME]" ``` ## Compile the Demo Code and Run It diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java similarity index 98% rename from tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java rename to tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java index da865b3ffde0fbd6af1b39e52a99ca5f190abda6..f256668dc6a3cd8ce7a2626be3d37a354919f955 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcDemo.java @@ -3,7 +3,7 @@ package com.taosdata.example; import java.sql.*; import java.util.Properties; -public class JDBCDemo { +public class JdbcDemo { private static String host; private static final String dbName = "test"; private static final String tbName = "weather"; @@ -17,7 +17,7 @@ public class JDBCDemo { if (host == null) { printHelp(); } - JDBCDemo demo = new JDBCDemo(); + JdbcDemo demo = new JdbcDemo(); demo.init(); demo.createDatabase(); demo.useDatabase(); diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java index eedb0ba166da50f21577296ad6fa1cd3f838ac32..5bf980f6d84e53438573812aa9f07d8d463f08c3 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java @@ -4,7 +4,7 @@ import java.sql.*; import java.util.Properties; public class JdbcRestfulDemo { - private static final String host = "master"; + private static final String host = "127.0.0.1"; public static void main(String[] args) { try { diff --git a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml index 64a91b951bafe65e2f685fb57091221760fb99f9..eac3dec0a92a4c8aa519cd426b9c8d3895047be6 100644 --- a/tests/examples/JDBC/SpringJdbcTemplate/pom.xml +++ b/tests/examples/JDBC/SpringJdbcTemplate/pom.xml @@ -40,7 +40,7 @@ junit junit - 4.13 + 4.13.1 test diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml index 84467003f905c454bb285088e56bcd26465a3e5e..34518900ed30f48effd47a8786233080f3e5291f 100644 --- a/tests/examples/JDBC/connectionPools/pom.xml +++ b/tests/examples/JDBC/connectionPools/pom.xml @@ -4,17 +4,22 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> 4.0.0 + + 1.8 + 1.8 + + com.taosdata.demo connectionPools 1.0-SNAPSHOT + com.taosdata.jdbc taos-jdbcdriver 2.0.18 - com.alibaba @@ -46,9 +51,15 @@ - log4j - log4j - 1.2.17 + org.apache.logging.log4j + log4j-core + 2.14.1 + + + + com.cloudhopper.proxool + proxool + 0.9.1 @@ -57,28 +68,49 @@ org.apache.maven.plugins maven-assembly-plugin - 3.1.0 - - - - com.taosdata.example.ConnectionPoolDemo - - - - jar-with-dependencies - - + 3.3.0 - make-assembly + ConnectionPoolDemo + + ConnectionPoolDemo + + + com.taosdata.example.ConnectionPoolDemo + + + + jar-with-dependencies + + + package + + single + + + + + ProxoolDemo + + ProxoolDemo + + + com.taosdata.example.ProxoolDemo + + + + jar-with-dependencies + + package single + - \ No newline at end of file + diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java index bd57d138b21034f45569ab3dcfc8e1ad5b39263d..96ad65aa4fc10bf81f6107a4bb2e5a4224891298 100644 --- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ConnectionPoolDemo.java @@ -5,7 +5,8 @@ import com.taosdata.example.pool.C3p0Builder; import com.taosdata.example.pool.DbcpBuilder; import com.taosdata.example.pool.DruidPoolBuilder; import com.taosdata.example.pool.HikariCpBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.sql.DataSource; import java.sql.Connection; @@ -17,7 +18,7 @@ import java.util.concurrent.TimeUnit; public class ConnectionPoolDemo { - private static Logger logger = Logger.getLogger(DruidPoolBuilder.class); + private static Logger logger = LogManager.getLogger(DruidPoolBuilder.class); private static final String dbName = "pool_test"; private static String poolType = "hikari"; diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java new file mode 100644 index 0000000000000000000000000000000000000000..632ad8c9bf69d13d137d06c1f23c964904c8e050 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/ProxoolDemo.java @@ -0,0 +1,56 @@ +package com.taosdata.example; + +import org.logicalcobwebs.proxool.ProxoolException; +import org.logicalcobwebs.proxool.configuration.JAXPConfigurator; + +import java.sql.*; + +public class ProxoolDemo { + + + public static void main(String[] args) { + + String xml = parseConfigurationXml(args); + if (xml == null) { + printHelp(); + System.exit(0); + } + + try { + JAXPConfigurator.configure(xml, false); + Class.forName("org.logicalcobwebs.proxool.ProxoolDriver"); + Connection connection = DriverManager.getConnection("proxool.ds"); + + Statement stmt = connection.createStatement(); + + ResultSet rs = stmt.executeQuery("show databases"); + ResultSetMetaData metaData = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= metaData.getColumnCount(); i++) { + System.out.print(metaData.getColumnLabel(i) + ": " + rs.getString(i)); + } + System.out.println(); + } + + stmt.close(); + + } catch (ClassNotFoundException | SQLException | ProxoolException e) { + e.printStackTrace(); + } + } + + private static String parseConfigurationXml(String[] args) { + String host = null; + for (int i = 0; i < args.length; i++) { + if ("--xml".equalsIgnoreCase(args[i]) && i < args.length - 1) { + host = args[++i]; + } + } + return host; + } + + private static void printHelp() { + System.out.println("Usage: java -jar ProxoolDemo.jar --xml [xml]"); + } + +} diff --git a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java index da7c9a22b5b3e7f5d877a3a1489d55f439bff883..f8f1555c08f1f5847bf0a34a56341ef6d22dde50 100644 --- a/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java +++ b/tests/examples/JDBC/connectionPools/src/main/java/com/taosdata/example/common/InsertTask.java @@ -1,6 +1,7 @@ package com.taosdata.example.common; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.sql.DataSource; import java.sql.Connection; @@ -10,7 +11,7 @@ import java.util.Random; public class InsertTask implements Runnable { private final Random random = new Random(System.currentTimeMillis()); - private static final Logger logger = Logger.getLogger(InsertTask.class); + private static final Logger logger = LogManager.getLogger(InsertTask.class); private final DataSource ds; private final String dbName; diff --git a/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml b/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml new file mode 100644 index 0000000000000000000000000000000000000000..67baa1c3931aa57591af8fc306ed441328606978 --- /dev/null +++ b/tests/examples/JDBC/connectionPools/src/main/resources/proxool.xml @@ -0,0 +1,27 @@ + + + + ds + + jdbc:TAOS-RS://127.0.0.1:6041/log + + com.taosdata.jdbc.rs.RestfulDriver + + + + + + + 100 + + 100 + + 1 + + 5 + + 30000 + + select server_status() + + \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/pom.xml b/tests/examples/JDBC/mybatisplus-demo/pom.xml index a83d0a00e69bdcffff2db8b17c763959dc67365b..ad6a63e800fb73dd3c768a8aca941f70cec235b3 100644 --- a/tests/examples/JDBC/mybatisplus-demo/pom.xml +++ b/tests/examples/JDBC/mybatisplus-demo/pom.xml @@ -68,7 +68,7 @@ junit junit - 4.12 + 4.13.1 test diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml index 22c2f3b63e82a3a2cdcc3093c3c43b98ab534a4b..91b976c2ae6c76a5ae2d7b76c3b90d05e4dae57f 100644 --- a/tests/examples/JDBC/taosdemo/pom.xml +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -4,7 +4,7 @@ 4.0.0 com.taosdata taosdemo - 2.0 + 2.0.1 taosdemo jar Demo project for TDengine @@ -81,20 +81,20 @@ mysql mysql-connector-java - 5.1.47 + 8.0.16 test - log4j - log4j - 1.2.17 + org.apache.logging.log4j + log4j-core + 2.14.1 junit junit - 4.12 + 4.13.1 test diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java index c361df82b0aebb0d804b1a0982a0c1cf44ef5953..d4f5ff26886b9f90a4235d47bfd004dae9de93f6 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -8,7 +8,8 @@ import com.taosdata.taosdemo.service.SqlExecuteTask; import com.taosdata.taosdemo.service.SubTableService; import com.taosdata.taosdemo.service.SuperTableService; import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.sql.DataSource; import java.io.IOException; @@ -20,7 +21,7 @@ import java.util.Map; public class TaosDemoApplication { - private static final Logger logger = Logger.getLogger(TaosDemoApplication.class); + private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class); public static void main(String[] args) throws IOException { // 读配置参数 diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java index 421a2dea1f4b49786d57b5579ca849976708791e..9340fc3fdd0ce7242d4121a5fb259af48f7ada5f 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/DatabaseMapperImpl.java @@ -1,14 +1,15 @@ package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.jdbc.core.JdbcTemplate; import javax.sql.DataSource; import java.util.Map; public class DatabaseMapperImpl implements DatabaseMapper { - private static final Logger logger = Logger.getLogger(DatabaseMapperImpl.class); + private static final Logger logger = LogManager.getLogger(DatabaseMapperImpl.class); private final JdbcTemplate jdbcTemplate; diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java index 90b0990a2bf2e9a9bd2738deec17a284c0868280..db0d43ff05a56b673ed08a522b645d3388f8e091 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapperImpl.java @@ -3,7 +3,8 @@ package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.domain.SubTableMeta; import com.taosdata.taosdemo.domain.SubTableValue; import com.taosdata.taosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.jdbc.core.JdbcTemplate; import javax.sql.DataSource; @@ -11,7 +12,7 @@ import java.util.List; public class SubTableMapperImpl implements SubTableMapper { - private static final Logger logger = Logger.getLogger(SubTableMapperImpl.class); + private static final Logger logger = LogManager.getLogger(SubTableMapperImpl.class); private final JdbcTemplate jdbcTemplate; public SubTableMapperImpl(DataSource dataSource) { diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java index efa9a1f39ea41dd1aba65ab610eae095a3164533..658a403a0ca3883831bca1ad2b6d579ef4713f7d 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapperImpl.java @@ -2,13 +2,14 @@ package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.domain.SuperTableMeta; import com.taosdata.taosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.jdbc.core.JdbcTemplate; import javax.sql.DataSource; public class SuperTableMapperImpl implements SuperTableMapper { - private static final Logger logger = Logger.getLogger(SuperTableMapperImpl.class); + private static final Logger logger = LogManager.getLogger(SuperTableMapperImpl.class); private JdbcTemplate jdbcTemplate; public SuperTableMapperImpl(DataSource dataSource) { diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java index b049fbe197034ffcd8801b9c4f5e5ff8dbbcc0e0..16bc094848f6ff585e826bf3181cc4e8c03ee822 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapperImpl.java @@ -3,13 +3,14 @@ package com.taosdata.taosdemo.dao; import com.taosdata.taosdemo.domain.TableMeta; import com.taosdata.taosdemo.domain.TableValue; import com.taosdata.taosdemo.utils.SqlSpeller; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.jdbc.core.JdbcTemplate; import java.util.List; public class TableMapperImpl implements TableMapper { - private static final Logger logger = Logger.getLogger(TableMapperImpl.class); + private static final Logger logger = LogManager.getLogger(TableMapperImpl.class); private JdbcTemplate template; @Override diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java index cea98a1c5d350ed22ed5d26c72fedb212dcb7f26..b0a79dea78f429d85804bae4cb0bbec9e712ec1a 100644 --- a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java @@ -8,7 +8,8 @@ import com.taosdata.taosdemo.domain.SubTableValue; import com.taosdata.taosdemo.domain.SuperTableMeta; import com.taosdata.taosdemo.service.data.SubTableMetaGenerator; import com.taosdata.taosdemo.service.data.SubTableValueGenerator; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import javax.sql.DataSource; import java.util.ArrayList; @@ -20,7 +21,7 @@ import java.util.stream.IntStream; public class SubTableService extends AbstractService { private SubTableMapper mapper; - private static final Logger logger = Logger.getLogger(SubTableService.class); + private static final Logger logger = LogManager.getLogger(SubTableService.class); public SubTableService(DataSource datasource) { this.mapper = new SubTableMapperImpl(datasource); diff --git a/tests/examples/c/CMakeLists.txt b/tests/examples/c/CMakeLists.txt index 7f941b8c87a2c256a82aa37f9e937a41cd9a4c77..e94de3cbca574de71c8bcefc4b52173922c05a98 100644 --- a/tests/examples/c/CMakeLists.txt +++ b/tests/examples/c/CMakeLists.txt @@ -5,16 +5,18 @@ IF (TD_LINUX) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(demo apitest.c) TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ) + ADD_EXECUTABLE(sml schemaless.c) + TARGET_LINK_LIBRARIES(sml taos_static trpc tutil pthread ) ADD_EXECUTABLE(subscribe subscribe.c) TARGET_LINK_LIBRARIES(subscribe taos_static trpc tutil pthread ) ADD_EXECUTABLE(epoll epoll.c) - TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ) + TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) ENDIF () IF (TD_DARWIN) INCLUDE_DIRECTORIES(. ${TD_COMMUNITY_DIR}/src/inc ${TD_COMMUNITY_DIR}/src/client/inc ${TD_COMMUNITY_DIR}/inc) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(demo demo.c) - TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread ) + TARGET_LINK_LIBRARIES(demo taos_static trpc tutil pthread lua) ADD_EXECUTABLE(epoll epoll.c) - TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread ) + TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua) ENDIF () diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c index 0f24df0f4767fe1cdace072425768473ffcaa88f..50a62e550fda56d46fc99276a7be82128e21d046 100644 --- a/tests/examples/c/apitest.c +++ b/tests/examples/c/apitest.c @@ -12,7 +12,7 @@ static void prepare_data(TAOS* taos) { result = taos_query(taos, "drop database if exists test;"); taos_free_result(result); usleep(100000); - result = taos_query(taos, "create database test;"); + result = taos_query(taos, "create database test precision 'us';"); taos_free_result(result); usleep(100000); taos_select_db(taos, "test"); @@ -359,7 +359,7 @@ void verify_prepare(TAOS* taos) { v.v8 = (int64_t)(i * 8); v.f4 = (float)(i * 40); v.f8 = (double)(i * 80); - for (int j = 0; j < sizeof(v.bin) - 1; ++j) { + for (int j = 0; j < sizeof(v.bin); ++j) { v.bin[j] = (char)(i + '0'); } @@ -556,7 +556,7 @@ void verify_prepare2(TAOS* taos) { v.v8[i] = (int64_t)(i * 8); v.f4[i] = (float)(i * 40); v.f8[i] = (double)(i * 80); - for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) { + for (int j = 0; j < sizeof(v.bin[0]); ++j) { v.bin[i][j] = (char)(i + '0'); } strcpy(v.blob[i], "一二三四五六七八九十"); @@ -808,7 +808,7 @@ void verify_prepare3(TAOS* taos) { v.v8[i] = (int64_t)(i * 8); v.f4[i] = (float)(i * 40); v.f8[i] = (double)(i * 80); - for (int j = 0; j < sizeof(v.bin[0]) - 1; ++j) { + for (int j = 0; j < sizeof(v.bin[0]); ++j) { v.bin[i][j] = (char)(i + '0'); } strcpy(v.blob[i], "一二三四五六七八九十"); @@ -949,13 +949,77 @@ void verify_stream(TAOS* taos) { taos_close_stream(strm); } +int32_t verify_schema_less(TAOS* taos) { + TAOS_RES *result; + result = taos_query(taos, "drop database if exists test;"); + taos_free_result(result); + usleep(100000); + result = taos_query(taos, "create database test precision 'us' update 1;"); + taos_free_result(result); + usleep(100000); + + taos_select_db(taos, "test"); + result = taos_query(taos, "create stable ste(ts timestamp, f int) tags(t1 bigint)"); + taos_free_result(result); + usleep(100000); + + int code = 0; + + char* lines[] = { + "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns", + "ste,t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532ns", + "st,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000ns", + "ste,t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532ns", + "ste,t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532ns", + "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns", + "stf,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns", + "stf,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000ns" + }; + + code = taos_insert_lines(taos, lines , sizeof(lines)/sizeof(char*)); + + char* lines2[] = { + "stg,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "stg,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns" + }; + code = taos_insert_lines(taos, &lines2[0], 1); + code = taos_insert_lines(taos, &lines2[1], 1); + + char* lines3[] = { + "sth,t1=4i64,t2=5f64,t4=5f64,ID=\"childtable\" c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641ms", + "sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms" + }; + code = taos_insert_lines(taos, lines3, 2); + + char* lines4[] = { + "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "dgtyqodr,t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns" + }; + code = taos_insert_lines(taos, lines4, 2); + + char* lines5[] = { + "zqlbgs,id=\"zqlbgs_39302_21680\",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64 1626006833639000000ns", + "zqlbgs,t9=f,id=\"zqlbgs_39302_21680\",t0=f,t1=127i8,t11=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t10=L\"ncharTagValue\" c10=f,c0=f,c1=127i8,c12=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\" 1626006833639000000ns" + }; + code = taos_insert_lines(taos, &lines5[0], 1); + code = taos_insert_lines(taos, &lines5[1], 1); + + + char* lines6[] = { + "st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "dgtyqodr,t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns" + }; + code = taos_insert_lines(taos, lines6, 2); + return (code); +} + int main(int argc, char *argv[]) { const char* host = "127.0.0.1"; const char* user = "root"; const char* passwd = "taosdata"; taos_options(TSDB_OPTION_TIMEZONE, "GMT-8"); - TAOS* taos = taos_connect(host, user, passwd, "", 0); if (taos == NULL) { printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos)); @@ -966,6 +1030,10 @@ int main(int argc, char *argv[]) { printf("server info: %s\n", info); info = taos_get_client_info(taos); printf("client info: %s\n", info); + + printf("************ verify shemaless *************\n"); + verify_schema_less(taos); + printf("************ verify query *************\n"); verify_query(taos); @@ -981,14 +1049,12 @@ int main(int argc, char *argv[]) { printf("************ verify prepare2 *************\n"); verify_prepare2(taos); - printf("************ verify prepare3 *************\n"); verify_prepare3(taos); - + printf("************ verify stream *************\n"); verify_stream(taos); printf("done\n"); - taos_close(taos); taos_cleanup(); } diff --git a/tests/examples/c/schemaless.c b/tests/examples/c/schemaless.c new file mode 100644 index 0000000000000000000000000000000000000000..3ea199c9144950526e4bbf59b9356753e2a88da6 --- /dev/null +++ b/tests/examples/c/schemaless.c @@ -0,0 +1,201 @@ +#include "taos.h" +#include "taoserror.h" +#include "os.h" + +#include +#include +#include +#include +#include + +int numSuperTables = 8; +int numChildTables = 4; +int numRowsPerChildTable = 2048; + +void shuffle(char**lines, size_t n) +{ + if (n > 1) + { + size_t i; + for (i = 0; i < n - 1; i++) + { + size_t j = i + rand() / (RAND_MAX / (n - i) + 1); + char* t = lines[j]; + lines[j] = lines[i]; + lines[i] = t; + } + } +} + +static int64_t getTimeInUs() { + struct timeval systemTime; + gettimeofday(&systemTime, NULL); + return (int64_t)systemTime.tv_sec * 1000000L + (int64_t)systemTime.tv_usec; +} + +int main(int argc, char* argv[]) { + TAOS_RES *result; + const char* host = "127.0.0.1"; + const char* user = "root"; + const char* passwd = "taosdata"; + + taos_options(TSDB_OPTION_TIMEZONE, "GMT-8"); + TAOS* taos = taos_connect(host, user, passwd, "", 0); + if (taos == NULL) { + printf("\033[31mfailed to connect to db, reason:%s\033[0m\n", taos_errstr(taos)); + exit(1); + } + + char* info = taos_get_server_info(taos); + printf("server info: %s\n", info); + info = taos_get_client_info(taos); + printf("client info: %s\n", info); + result = taos_query(taos, "drop database if exists db;"); + taos_free_result(result); + usleep(100000); + result = taos_query(taos, "create database db precision 'ms';"); + taos_free_result(result); + usleep(100000); + + (void)taos_select_db(taos, "db"); + + time_t ct = time(0); + int64_t ts = ct * 1000; + char* lineFormat = "sta%d,t0=true,t1=127i8,t2=32767i16,t3=%di32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" %lldms"; + + char** lines = calloc(numSuperTables * numChildTables * numRowsPerChildTable, sizeof(char*)); + int l = 0; + for (int i = 0; i < numSuperTables; ++i) { + for (int j = 0; j < numChildTables; ++j) { + for (int k = 0; k < numRowsPerChildTable; ++k) { + char* line = calloc(512, 1); + snprintf(line, 512, lineFormat, i, j, ts + 10 * l); + lines[l] = line; + ++l; + } + } + } + shuffle(lines, numSuperTables * numChildTables * numRowsPerChildTable); + + printf("%s\n", "begin taos_insert_lines"); + int64_t begin = getTimeInUs(); + int32_t code = taos_insert_lines(taos, lines, numSuperTables * numChildTables * numRowsPerChildTable); + int64_t end = getTimeInUs(); + printf("code: %d, %s. time used: %"PRId64"\n", code, tstrerror(code), end-begin); + + char* lines_000_0[] = { + "sta1,id=sta1_1,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000us" + }; + + code = taos_insert_lines(taos, lines_000_0 , sizeof(lines_000_0)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_000_0 should return error\n"); + return -1; + } + + char* lines_000_1[] = { + "sta2,id=\"sta2_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=255u8,t6=32770u16,t7=2147483699u32,t8=9223372036854775899u64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639001" + }; + + code = taos_insert_lines(taos, lines_000_1 , sizeof(lines_000_1)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_000_1 should return error\n"); + return -1; + } + + char* lines_000_2[] = { + "sta3,id=\"sta3_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=255u8,c6=32770u16,c7=2147483699u32,c8=9223372036854775899u64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 0" + }; + + code = taos_insert_lines(taos, lines_000_2 , sizeof(lines_000_2)/sizeof(char*)); + if (0 != code) { + printf("taos_insert_lines() lines_000_2 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_001_0[] = { + "sta4,t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000us", + + }; + + code = taos_insert_lines(taos, lines_001_0 , sizeof(lines_001_0)/sizeof(char*)); + if (0 != code) { + printf("taos_insert_lines() lines_001_0 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_001_1[] = { + "sta5,id=\"sta5_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639001" + }; + + code = taos_insert_lines(taos, lines_001_1 , sizeof(lines_001_1)/sizeof(char*)); + if (0 != code) { + printf("taos_insert_lines() lines_001_1 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_001_2[] = { + "sta6,id=\"sta6_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t9=11.12345f32,t10=22.123456789f64,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c9=11.12345f32,c10=22.123456789f64,c11=\"binaryValue\",c12=L\"ncharValue\" 0" + }; + + code = taos_insert_lines(taos, lines_001_2 , sizeof(lines_001_2)/sizeof(char*)); + if (0 != code) { + printf("taos_insert_lines() lines_001_2 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_002[] = { + "stb,id=\"stb_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639000000ns", + "stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833639019us", + "stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006833640ms", + "stc,id=\"stc_1\",t20=t,t21=T,t22=true,t23=True,t24=TRUE,t25=f,t26=F,t27=false,t28=False,t29=FALSE,t10=33.12345,t11=\"binaryTagValue\",t12=L\"ncharTagValue\" c20=t,c21=T,c22=true,c23=True,c24=TRUE,c25=f,c26=F,c27=false,c28=False,c29=FALSE,c10=33.12345,c11=\"binaryValue\",c12=L\"ncharValue\" 1626006834s" + }; + + code = taos_insert_lines(taos, lines_002 , sizeof(lines_002)/sizeof(char*)); + if (0 != code) { + printf("taos_insert_lines() lines_002 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + //Duplicate key check; + char* lines_003_1[] = { + "std,id=\"std_3_1\",t1=4i64,Id=\"std\",t2=true c1=true 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_1 , sizeof(lines_003_1)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_1 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_003_2[] = { + "std,id=\"std_3_2\",tag1=4i64,Tag2=true,tAg3=2,TaG2=\"dup!\" c1=true 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_2 , sizeof(lines_003_2)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_2 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_003_3[] = { + "std,id=\"std_3_3\",tag1=4i64 field1=true,Field2=2,FIElD1=\"dup!\",fIeLd4=true 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_3 , sizeof(lines_003_3)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_3 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + + char* lines_003_4[] = { + "std,id=\"std_3_4\",tag1=4i64,dupkey=4i16,tag2=T field1=true,dUpkEy=1e3f32,field2=\"1234\" 1626006834s" + }; + + code = taos_insert_lines(taos, lines_003_4 , sizeof(lines_003_4)/sizeof(char*)); + if (0 == code) { + printf("taos_insert_lines() lines_003_4 return code:%d (%s)\n", code, (char*)tstrerror(code)); + return -1; + } + return 0; +} diff --git a/tests/examples/lua/luaconnector.so b/tests/examples/lua/luaconnector.so new file mode 100755 index 0000000000000000000000000000000000000000..08bf6a6156aebe053132545193cd111fb436bc4b Binary files /dev/null and b/tests/examples/lua/luaconnector.so differ diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index bf62f401bffa1cd4d3189d551d02c8a3e227be4b..bcc944dadb4f8cce0e4c5ccc9aaab1dcd6a4f858 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -1,5 +1,21 @@ #!/bin/bash +branch= +if [ x$1 != x ];then + branch=$1 + echo "Testing branch: $branch" +else + echo "Please enter branch name as a parameter" + exit 1 +fi +jemalloc= +if [ x$2 != x ];then + jemalloc=jemalloc + echo "Building TDengine using jemalloc" +else + echo "Building TDengine using glibc" +fi + today=`date +"%Y%m%d"` WORK_DIR=/home/ubuntu/pxiao PERFORMANCE_TEST_REPORT=$WORK_DIR/TDengine/tests/performance-test-report-$today.log @@ -40,8 +56,8 @@ function buildTDengine { git remote update > /dev/null git reset --hard HEAD - git checkout master - REMOTE_COMMIT=`git rev-parse --short remotes/origin/master` + git checkout $branch + REMOTE_COMMIT=`git rev-parse --short remotes/origin/$branch` LOCAL_COMMIT=`git rev-parse --short @` echo " LOCAL: $LOCAL_COMMIT" @@ -53,9 +69,20 @@ function buildTDengine { git pull > /dev/null 2>&1 LOCAL_COMMIT=`git rev-parse --short @` + if [ $jemalloc = "jemalloc" ];then + echo "git submodule update --init --recursive" + git submodule update --init --recursive + fi + cd debug rm -rf * - cmake .. > /dev/null + + if [ $jemalloc = "jemalloc" ];then + echo "cmake .. -DJEMALLOC_ENABLED=true > /dev/null" + cmake .. -DJEMALLOC_ENABLED=true > /dev/null + else + cmake .. > /dev/null + fi make && make install > /dev/null fi } @@ -95,7 +122,8 @@ function sendReport { sed -i 's/\x1b\[[0-9;]*m//g' $PERFORMANCE_TEST_REPORT BODY_CONTENT=`cat $PERFORMANCE_TEST_REPORT` - echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ + + echo -e "From: \nto: ${receiver}\nsubject: Query Performace Report ${branch} ${jemalloc} ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \ (cat - && uuencode $PERFORMANCE_TEST_REPORT performance-test-report-$today.log) | \ /usr/sbin/ssmtp "${receiver}" && echo "Report Sent!" } diff --git a/tests/pytest/alter/alter_keep.py b/tests/pytest/alter/alter_keep.py index 72ca635ac3df60eb0caaf206220eea279420be5b..b23f364fc6f16973f8e7b5e6159f95718df9b91b 100644 --- a/tests/pytest/alter/alter_keep.py +++ b/tests/pytest/alter/alter_keep.py @@ -173,8 +173,9 @@ class TDTestCase: tdSql.checkData(0,7,'10,10,10') tdSql.error('insert into tb values (now-15d, 10)') tdSql.query('select * from tb') - tdSql.checkRows(rowNum) + tdSql.checkRows(2) + rowNum = 2 tdLog.notice('testing keep will be altered if sudden change from small to big') for i in range(30): tdSql.execute('alter database db keep 14,14,14') @@ -182,14 +183,19 @@ class TDTestCase: tdSql.execute('insert into tb values (now-15d, 10)') tdSql.query('select * from tb') rowNum += 1 - tdSql.checkRows(rowNum ) + tdSql.checkRows(rowNum) tdLog.notice('testing keep will be altered if sudden change from big to small') tdSql.execute('alter database db keep 16,16,16') tdSql.execute('alter database db keep 14,14,14') tdSql.error('insert into tb values (now-15d, 10)') tdSql.query('select * from tb') - tdSql.checkRows(rowNum) + tdSql.checkRows(2) + + tdLog.notice('testing data will show up again when keep is being changed to large value') + tdSql.execute('alter database db keep 40,40,40') + tdSql.query('select * from tb') + tdSql.checkRows(63) diff --git a/tests/pytest/crash_gen/valgrind_taos.supp b/tests/pytest/crash_gen/valgrind_taos.supp index 5f6604ba776b03eb5a00280cdf8ea2e8cde36f99..376567b7e80cbb4544d48b0e28c5d6404b6db468 100644 --- a/tests/pytest/crash_gen/valgrind_taos.supp +++ b/tests/pytest/crash_gen/valgrind_taos.supp @@ -17517,4 +17517,229 @@ fun:taosGetFqdn fun:taosCheckGlobalCfg fun:taos_init_imp +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec_mtrand + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + fun:__Pyx_InitCachedConstants + fun:__pyx_pymod_exec__generator + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec_bit_generator + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec__common + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec__bounded_integers + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec__mt19937 + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec__philox + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec__pcg64 + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__pyx_pymod_exec__sfc64 + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + obj:/usr/bin/python3.8 + fun:PyTuple_Pack + fun:__Pyx_InitCachedConstants + fun:__pyx_pymod_exec__generator + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + fun:__pyx_pymod_exec_mtrand + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyCode_NewWithPosOnlyArgs + fun:PyCode_New + fun:__pyx_pymod_exec_bit_generator + fun:PyModule_ExecDef + obj:/usr/bin/python3.8 + obj:/usr/bin/python3.8 + fun:PyVectorcall_Call + fun:_PyEval_EvalFrameDefault + fun:_PyEval_EvalCodeWithName + fun:_PyFunction_Vectorcall + fun:_PyEval_EvalFrameDefault +} +{ + + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:__libc_alloc_buffer_allocate + fun:alloc_buffer_allocate + fun:__resolv_conf_allocate + fun:__resolv_conf_load + fun:__resolv_conf_get_current + fun:__res_vinit + fun:maybe_init + fun:context_get + fun:__resolv_context_get + fun:gaih_inet.constprop.7 + fun:getaddrinfo + fun:taosGetFqdn + fun:taosCheckGlobalCfg + fun:taos_init_imp } \ No newline at end of file diff --git a/tests/pytest/dbmgmt/nanoSecondCheck.py b/tests/pytest/dbmgmt/nanoSecondCheck.py new file mode 100644 index 0000000000000000000000000000000000000000..a5e9adacee53a9172a2d8990ccc4d83feb983bdd --- /dev/null +++ b/tests/pytest/dbmgmt/nanoSecondCheck.py @@ -0,0 +1,219 @@ +# ################################################################# +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. + +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao + +# ################################################################# + +# -*- coding: utf-8 -*- + +# TODO: after TD-4518 and TD-4510 is resolved, add the exception test case for these situations + +import sys +from util.log import * +from util.cases import * +from util.sql import * +import time +from datetime import datetime +import os + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute('reset query cache') + tdSql.execute('drop database if exists db') + tdSql.execute('create database db precision "ns";') + tdSql.query('show databases;') + tdSql.checkData(0,16,'ns') + tdSql.execute('use db') + + tdLog.debug('testing nanosecond support in 1st timestamp') + tdSql.execute('create table tb (ts timestamp, speed int)') + tdSql.execute('insert into tb values(\'2021-06-10 0:00:00.100000001\', 1);') + tdSql.execute('insert into tb values(1623254400150000000, 2);') + tdSql.execute('import into tb values(1623254400300000000, 3);') + tdSql.execute('import into tb values(1623254400299999999, 4);') + tdSql.execute('insert into tb values(1623254400300000001, 5);') + tdSql.execute('insert into tb values(1623254400999999999, 7);') + + + tdSql.query('select * from tb;') + tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001') + tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000') + tdSql.checkData(2,0,'2021-06-10 0:00:00.299999999') + tdSql.checkData(3,1,3) + tdSql.checkData(4,1,5) + tdSql.checkData(5,1,7) + tdSql.checkRows(6) + tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;') + tdSql.checkData(0,0,1) + tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;') + tdSql.checkData(0,0,1) + tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb where ts > 1623254400400000000;') + tdSql.checkData(0,0,1) + tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb where ts > now + 400000000b;') + tdSql.checkRows(0) + + tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';') + tdSql.checkData(0,0,6) + + tdSql.query('select count(*) from tb where ts <= 1623254400300000000;') + tdSql.checkData(0,0,4) + + tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';') + tdSql.checkRows(0) + + tdSql.query('select count(*) from tb where ts = 1623254400150000000;') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';') + tdSql.checkData(0,0,3) + + tdSql.query('select avg(speed) from tb interval(5000000000b);') + tdSql.checkRows(1) + + tdSql.query('select avg(speed) from tb interval(100000000b)') + tdSql.checkRows(4) + + tdSql.error('select avg(speed) from tb interval(1b);') + tdSql.error('select avg(speed) from tb interval(999b);') + + tdSql.query('select avg(speed) from tb interval(1000b);') + tdSql.checkRows(5) + + tdSql.query('select avg(speed) from tb interval(1u);') + tdSql.checkRows(5) + + tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);') + tdSql.checkRows(4) + + tdSql.query('select last(*) from tb') + tdSql.checkData(0,0, '2021-06-10 0:00:00.999999999') + tdSql.checkData(0,0, 1623254400999999999) + + tdSql.query('select first(*) from tb') + tdSql.checkData(0,0, 1623254400100000001) + tdSql.checkData(0,0, '2021-06-10 0:00:00.100000001') + + tdSql.execute('insert into tb values(now + 500000000b, 6);') + tdSql.query('select * from tb;') + tdSql.checkRows(7) + + tdLog.debug('testing nanosecond support in other timestamps') + tdSql.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);') + tdSql.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');') + tdSql.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);') + tdSql.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);') + tdSql.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);') + tdSql.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);') + tdSql.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);') + + tdSql.query('select * from tb2;') + tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001') + tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000') + tdSql.checkData(2,1,4) + tdSql.checkData(3,1,3) + tdSql.checkData(4,2,'2021-06-11 00:00:00.300000001') + tdSql.checkData(5,2,'2021-06-13 00:00:00.999999999') + tdSql.checkRows(6) + tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;') + tdSql.checkData(0,0,1) + tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\';') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000;') + tdSql.checkData(0,0,1) + tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\';') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b;') + tdSql.checkRows(0) + + tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';') + tdSql.checkData(0,0,6) + + tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000;') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\';') + tdSql.checkRows(0) + + tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001;') + tdSql.checkData(0,0,1) + + tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';') + tdSql.checkData(0,0,3) + + tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999;') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\';') + tdSql.checkData(0,0,6) + + tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999;') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';') + tdSql.checkData(0,0,5) + + tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\';') + tdSql.checkData(0,0,6) + + tdSql.execute('insert into tb2 values(now + 500000000b, 6, now +2d);') + tdSql.query('select * from tb2;') + tdSql.checkRows(7) + + tdLog.debug('testing ill nanosecond format handling') + tdSql.execute('create table tb3 (ts timestamp, speed int);') + + tdSql.error('insert into tb3 values(16232544001500000, 2);') + tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2);') + tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\';') + tdSql.checkRows(1) + + tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2);') + tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\';') + tdSql.checkRows(1) + + os.system('sudo timedatectl set-ntp on') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py index ee663f89b0a6dd776c80033f177f63ec843eaa1e..43e281f43769f59c2384fed43d00868c10a05342 100644 --- a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py +++ b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py @@ -12,9 +12,6 @@ # -*- coding: utf-8 -*- from basic import * -from util.sql import tdSql - - class TDTestCase: @@ -36,4 +33,6 @@ td = TDTestCase() td.init() +## usage: python3 OneMnodeMultipleVnodesTest.py + diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 50914b0be9428ab77f479c9a18a099ecbd0a2d51..871d69790d328f3dcea9fdfdac27a6abc3bb14bd 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -44,7 +44,16 @@ class BuildDockerCluser: "jnidebugFlag":"135", "qdebugFlag":"135", "maxSQLLength":"1048576" - } + } + cmd = "mkdir -p %s" % self.dockerDir + self.execCmd(cmd) + + cmd = "cp *.yml %s" % self.dockerDir + self.execCmd(cmd) + + cmd = "cp Dockerfile %s" % self.dockerDir + self.execCmd(cmd) + # execute command, and return the output # ref: https://blog.csdn.net/wowocpp/article/details/80775650 @@ -81,7 +90,7 @@ class BuildDockerCluser: def removeFile(self, rootDir, index, dir): cmd = "rm -rf %s/node%d/%s/*" % (rootDir, index, dir) self.execCmd(cmd) - + def clearEnv(self): cmd = "cd %s && docker-compose down --remove-orphans" % self.dockerDir self.execCmd(cmd) @@ -108,10 +117,14 @@ class BuildDockerCluser: self.execCmd(cmd) def updateLocalhosts(self): - cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts" + cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts | sed 's: ::g'" result = self.execCmdAndGetOutput(cmd) - if result and not result.isspace(): + print(result) + if result is None or result.isspace(): + print("==========") cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + display = "echo %s" % cmd + self.execCmd(display) self.execCmd(cmd) def deploy(self): @@ -138,13 +151,13 @@ class BuildDockerCluser: if self.numOfNodes < 2 or self.numOfNodes > 10: print("the number of nodes must be between 2 and 10") exit(0) - self.clearEnv() - self.createDirs() self.updateLocalhosts() self.deploy() def run(self): - cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir) + cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir) + display = "echo %s" % cmd + self.execCmd(display) self.execCmd(cmd) self.getConnection() self.createDondes() diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index b9b7bbcaf6bd056329ae04b0b2fb91ff56f3ce45..bc410107e6b5fc3bd2528ff8904c3125d2370a15 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -27,6 +27,7 @@ python3 ./test.py -f insert/bug3654.py python3 ./test.py -f insert/insertDynamicColBeforeVal.py python3 ./test.py -f insert/in_function.py python3 ./test.py -f insert/modify_column.py +python3 ./test.py -f insert/line_insert.py #table python3 ./test.py -f table/alter_wal0.py @@ -43,6 +44,7 @@ python3 ./test.py -f table/del_stable.py #stable python3 ./test.py -f stable/insert.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py # tag python3 ./test.py -f tag_lite/filter.py @@ -73,8 +75,10 @@ python3 ./test.py -f tag_lite/int.py python3 ./test.py -f tag_lite/set.py python3 ./test.py -f tag_lite/smallint.py python3 ./test.py -f tag_lite/tinyint.py +python3 ./test.py -f tag_lite/timestamp.py #python3 ./test.py -f dbmgmt/database-name-boundary.py +python3 test.py -f dbmgmt/nanoSecondCheck.py python3 ./test.py -f import_merge/importBlock1HO.py python3 ./test.py -f import_merge/importBlock1HPO.py @@ -148,6 +152,7 @@ python3 ./test.py -f import_merge/importCSV.py #======================p2-start=============== # tools python3 test.py -f tools/taosdumpTest.py +python3 test.py -f tools/taosdumpTest2.py python3 test.py -f tools/taosdemoTest.py python3 test.py -f tools/taosdemoTestWithoutMetric.py @@ -158,7 +163,7 @@ python3 test.py -f tools/taosdemoTestSampleData.py python3 test.py -f tools/taosdemoTestInterlace.py python3 test.py -f tools/taosdemoTestQuery.py - +python3 test.py -f tools/taosdumpTestNanoSupport.py # update python3 ./test.py -f update/allow_update.py @@ -190,6 +195,10 @@ python3 ./test.py -f perfbenchmark/bug3433.py #python3 ./test.py -f perfbenchmark/bug3589.py python3 ./test.py -f perfbenchmark/taosdemoInsert.py +#taosdemo +python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py + #query python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py @@ -233,7 +242,12 @@ python3 ./test.py -f query/queryTscomputWithNow.py python3 ./test.py -f query/computeErrorinWhere.py python3 ./test.py -f query/queryTsisNull.py python3 ./test.py -f query/subqueryFilter.py - +python3 ./test.py -f query/nestedQuery/queryInterval.py +python3 ./test.py -f query/queryStateWindow.py +python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py +python3 ./test.py -f query/nestquery_last_row.py +python3 ./test.py -f query/queryCnameDisplay.py +python3 test.py -f query/nestedQuery/queryWithSpread.py #stream python3 ./test.py -f stream/metric_1.py @@ -320,13 +334,16 @@ python3 ./test.py -f query/last_row_cache.py python3 ./test.py -f account/account_create.py python3 ./test.py -f alter/alter_table.py python3 ./test.py -f query/queryGroupbySort.py +python3 ./test.py -f functions/queryTestCases.py python3 ./test.py -f functions/function_stateWindow.py python3 ./test.py -f functions/function_derivative.py +python3 ./test.py -f functions/function_irate.py python3 ./test.py -f insert/unsignedInt.py python3 ./test.py -f insert/unsignedBigint.py python3 ./test.py -f insert/unsignedSmallint.py python3 ./test.py -f insert/unsignedTinyint.py +python3 ./test.py -f insert/insertFromCSV.py python3 ./test.py -f query/filterAllUnsignedIntTypes.py python3 ./test.py -f tag_lite/unsignedInt.py @@ -336,16 +353,21 @@ python3 ./test.py -f tag_lite/unsignedTinyint.py python3 ./test.py -f functions/function_percentile2.py python3 ./test.py -f insert/boundary2.py +python3 ./test.py -f insert/insert_locking.py python3 ./test.py -f alter/alter_debugFlag.py python3 ./test.py -f query/queryBetweenAnd.py python3 ./test.py -f tag_lite/alter_tag.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py -python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py + +python3 test.py -f tools/taosdemoAllTest/TD-4985/query-limit-offset.py +python3 test.py -f tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py +python3 test.py -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py python3 ./test.py -f tag_lite/drop_auto_create.py python3 test.py -f insert/insert_before_use_db.py python3 test.py -f alter/alter_keep.py python3 test.py -f alter/alter_cacheLastRow.py python3 ./test.py -f query/querySession.py python3 test.py -f alter/alter_create_exception.py + +python3 ./test.py -f insert/flushwhiledrop.py #======================p4-end=============== diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py index 4ddb9c309bfa37897bc1eb7870dc4fd1e9cd6293..9d60129672b5516d83e696f67b5c8d5a17630afd 100644 --- a/tests/pytest/functions/function_derivative.py +++ b/tests/pytest/functions/function_derivative.py @@ -128,7 +128,14 @@ class TDTestCase: def run(self): tdSql.prepare() - self.insertAndCheckData() + self.insertAndCheckData() + + tdSql.execute("create table st(ts timestamp, c1 int, c2 int) tags(id int)") + tdSql.execute("insert into dev1(ts, c1) using st tags(1) values(now, 1)") + + tdSql.error("select derivative(c1, 10s, 0) from (select c1 from st)") + tdSql.query("select diff(c1) from (select derivative(c1, 1s, 0) c1 from dev1)") + tdSql.checkRows(0) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/function_irate.py b/tests/pytest/functions/function_irate.py new file mode 100644 index 0000000000000000000000000000000000000000..4e876cc270b98e3bb5186e07d30b6cf60c5ee298 --- /dev/null +++ b/tests/pytest/functions/function_irate.py @@ -0,0 +1,261 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 100 + self.ts = 1537146000000 + self.ts1 = 1537146000000000 + self.ts2 = 1597146000000 + + + def run(self): + # db precison ms + tdSql.prepare() + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20), tag1 int)''') + tdSql.execute("create table test1 using test tags('beijing', 10)") + tdSql.execute("create table test2 using test tags('tianjing', 20)") + tdSql.execute("create table test3 using test tags('shanghai', 20)") + tdSql.execute("create table gtest1 (ts timestamp, col1 float)") + tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)") + + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + tdSql.execute("insert into test2 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts2 + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + tdSql.execute("insert into test3 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts2 + i*1000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.execute("insert into gtest1 values(1537146000000,0);") + tdSql.execute("insert into gtest1 values(1537146001100,1.2);") + tdSql.execute("insert into gtest2 values(1537146001001,1);") + tdSql.execute("insert into gtest2 values(1537146001101,2);") + tdSql.execute("insert into gtest3 values(1537146001101,2);") + tdSql.execute("insert into gtest4(ts) values(1537146001101);") + tdSql.execute("insert into gtest5 values(1537146001002,4);") + tdSql.execute("insert into gtest5 values(1537146002202,4);") + tdSql.execute("insert into gtest6 values(1537146000000,5);") + tdSql.execute("insert into gtest6 values(1537146001000,2);") + tdSql.execute("insert into gtest7 values(1537146001000,1);") + tdSql.execute("insert into gtest7 values(1537146008000,2);") + tdSql.execute("insert into gtest7 values(1537146009000,6);") + tdSql.execute("insert into gtest7 values(1537146012000,3);") + tdSql.execute("insert into gtest7 values(1537146015000,3);") + tdSql.execute("insert into gtest7 values(1537146017000,1);") + tdSql.execute("insert into gtest7 values(1537146019000,3);") + tdSql.execute("insert into gtest8 values(1537146000002,4);") + tdSql.execute("insert into gtest8 values(1537146002202,4);") + + # irate verifacation --child table'query + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from test1 interval(10s);") + tdSql.checkData(0, 1, 1) + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col2) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col3) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col4) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col5) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col6) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col11) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col12) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col13) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col14) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col2) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col2) from test1;") + tdSql.checkData(0, 0, 1) + + # irate verifacation --super table'query + tdSql.query("select irate(col1) from test group by tbname,loc,tag1;") + tdSql.checkData(0, 0, 1) + tdSql.checkData(1, 1, "test2") + tdSql.checkData(2, 2, "shanghai") + + # add function testcase of twa: query from super table + tdSql.query("select twa(col1) from test group by tbname,loc,tag1;") + tdSql.checkData(0, 0, 50.5) + tdSql.checkData(1, 1, "test2") + tdSql.checkData(2, 2, "shanghai") + + # error: function of irate and twa has invalid operation + tdSql.error("select irate(col7) from test group by tbname,loc,tag1;") + tdSql.error("select irate(col7) from test group by tbname;") + tdSql.error("select irate(col1) from test group by loc,tbname,tag1;") + # tdSql.error("select irate(col1) from test group by tbname,col7;") + tdSql.error("select irate(col1) from test group by col7,tbname;") + tdSql.error("select twa(col7) from test group by tbname,loc,tag1;") + tdSql.error("select twa(col7) from test group by tbname;") + tdSql.error("select twa(col1) from test group by loc,tbname,tag1;") + # tdSql.error("select twa(col1) from test group by tbname,col7;") + tdSql.error("select twa(col1) from test group by col7,tbname;") + + + # general table'query + tdSql.query("select irate(col1) from gtest1;") + tdSql.checkData(0, 0, 1.2/1.1) + tdSql.query("select irate(col1) from gtest2;") + tdSql.checkData(0, 0, 10) + tdSql.query("select irate(col1) from gtest3;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest4;") + tdSql.checkRows(0) + tdSql.query("select irate(col1) from gtest5;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest6;") + tdSql.checkData(0, 0, 2) + tdSql.query("select irate(col1) from gtest7;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;") + tdSql.checkData(1, 1, 4) + tdSql.checkData(2, 1, 0) + tdSql.checkData(3, 1, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;") + tdSql.checkData(1, 1, 0) + tdSql.checkData(2, 1, 4) + tdSql.checkData(3, 1, 0) + + #error + tdSql.error("select irate(col1) from test") + tdSql.error("select irate(ts) from test1") + tdSql.error("select irate(col7) from test1") + tdSql.error("select irate(col8) from test1") + tdSql.error("select irate(col9) from test1") + tdSql.error("select irate(loc) from test1") + tdSql.error("select irate(tag1) from test1") + + # use db1 precision us + tdSql.execute("create database db1 precision 'us' keep 3650 UPDATE 1") + tdSql.execute("use db1 ") + tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double, + col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''') + tdSql.execute("create table test1 using test tags('beijing')") + tdSql.execute("create table gtest1 (ts timestamp, col1 float)") + tdSql.execute("create table gtest2 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest3 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest4 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest5 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest6 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest7 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest8 (ts timestamp, col1 tinyint)") + tdSql.execute("create table gtest9 (ts timestamp, col1 tinyint)") + + for i in range(self.rowNum): + tdSql.execute("insert into test1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)" + % (self.ts1 + i*1000000, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1)) + + tdSql.execute("insert into gtest1 values(1537146000000000,0);") + tdSql.execute("insert into gtest1 values(1537146001100000,1.2);") + tdSql.execute("insert into gtest2 values(1537146001001000,1);") + tdSql.execute("insert into gtest2 values(1537146001101000,2);") + tdSql.execute("insert into gtest3 values(1537146001101000,2);") + tdSql.execute("insert into gtest4(ts) values(1537146001101000);") + tdSql.execute("insert into gtest5 values(1537146001002000,4);") + tdSql.execute("insert into gtest5 values(1537146002202000,4);") + tdSql.execute("insert into gtest6 values(1537146000000000,5);") + tdSql.execute("insert into gtest6 values(1537146001000000,2);") + tdSql.execute("insert into gtest7 values(1537146001000000,1);") + tdSql.execute("insert into gtest7 values(1537146008000000,2);") + tdSql.execute("insert into gtest7 values(1537146009000000,6);") + tdSql.execute("insert into gtest7 values(1537146012000000,3);") + tdSql.execute("insert into gtest7 values(1537146015000000,3);") + tdSql.execute("insert into gtest7 values(1537146017000000,1);") + tdSql.execute("insert into gtest7 values(1537146019000000,3);") + tdSql.execute("insert into gtest8 values(1537146000002000,3);") + tdSql.execute("insert into gtest8 values(1537146001003000,4);") + tdSql.execute("insert into gtest9 values(1537146000000000,4);") + tdSql.execute("insert into gtest9 values(1537146000000001,5);") + + + # irate verifacation + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from test1 interval(10s);") + tdSql.checkData(0, 1, 1) + tdSql.query("select irate(col1) from test1;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from gtest1;") + tdSql.checkData(0, 0, 1.2/1.1) + tdSql.query("select irate(col1) from gtest2;") + tdSql.checkData(0, 0, 10) + tdSql.query("select irate(col1) from gtest3;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest4;") + tdSql.checkRows(0) + tdSql.query("select irate(col1) from gtest5;") + tdSql.checkData(0, 0, 0) + tdSql.query("select irate(col1) from gtest6;") + tdSql.checkData(0, 0, 2) + tdSql.query("select irate(col1) from gtest7;") + tdSql.checkData(0, 0, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts asc;") + tdSql.checkData(1, 1, 4) + tdSql.checkData(2, 1, 0) + tdSql.checkData(3, 1, 1) + tdSql.query("select irate(col1) from gtest7 interval(5s) order by ts desc ;") + tdSql.checkData(1, 1, 0) + tdSql.checkData(2, 1, 4) + tdSql.checkData(3, 1, 0) + tdSql.query("select irate(col1) from gtest8;") + tdSql.checkData(0, 0, 1/1.001) + tdSql.query("select irate(col1) from gtest9;") + tdSql.checkData(0, 0, 1000000) + + #error + tdSql.error("select irate(col1) from test") + tdSql.error("select irate(ts) from test1") + tdSql.error("select irate(col7) from test1") + tdSql.error("select irate(col8) from test1") + tdSql.error("select irate(col9) from test1") + tdSql.error("select irate(loc) from test1") + tdSql.error("select irate(tag1) from test1") + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/functions/function_operations.py b/tests/pytest/functions/function_operations.py index 859cd78a3d396cf6a4fd9d08c24f6f7bd6ac324c..3e90d1d3eab8288fb9b6b0bb293250e6cf7ae2a1 100644 --- a/tests/pytest/functions/function_operations.py +++ b/tests/pytest/functions/function_operations.py @@ -16,7 +16,7 @@ import taos from util.log import * from util.cases import * from util.sql import * -import numpy as np +#import numpy as np class TDTestCase: diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py new file mode 100644 index 0000000000000000000000000000000000000000..b7480fdbd555f97f17d71b5583296d49ffcd4061 --- /dev/null +++ b/tests/pytest/functions/queryTestCases.py @@ -0,0 +1,364 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + tdSql.query("show variables") + tdSql.checkData(51, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.query("show variables") + tdSql.checkData(26, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(26, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(26, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.query("show variables") + tdSql.checkData(36, 1, 3650) + tdSql.query("show databases") + tdSql.checkData(0,7,"3650,3650,3650") + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + tdSql.checkData(0, 7, "3650,3650,36500") + tdSql.execute("drop database if exists db") + + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + tdSql.checkData(0, 7, "3650,3650,3650") + tdSql.query("show variables") + tdSql.checkData(36, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def run(self): + + # master branch + # self.td3690() + # self.td4082() + # self.td4288() + self.td4724() + + # develop branch + # self.td4097() + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py index 0d583aa2cce26fb400e4a943f49cacee96c933d6..0cbb7876c6194041a160f8fee7271f0c76d3b90c 100644 --- a/tests/pytest/insert/binary.py +++ b/tests/pytest/insert/binary.py @@ -4,6 +4,8 @@ import sys from util.log import * from util.cases import * from util.sql import * +import subprocess +import os class TDTestCase: @@ -50,6 +52,10 @@ class TDTestCase: tdLog.info('==> $data00') tdLog.info("tdSql.checkData(0, 0, '34567')") tdSql.checkData(0, 0, '34567') + tdLog.info("insert into tb values (now+4a, \"'';\")") + config_dir = subprocess.check_output(str("ps -ef |grep dnode1|grep -v grep |awk '{print $NF}'"), stderr=subprocess.STDOUT, shell=True).decode('utf-8').replace('\n', '') + result = ''.join(os.popen(r"""taos -s "insert into db.tb values (now+4a, \"'';\")" -c %s"""%(config_dir)).readlines()) + if "Query OK" not in result: tdLog.exit("err:insert '';") tdLog.info('drop database db') tdSql.execute('drop database db') tdLog.info('show databases') diff --git a/tests/pytest/insert/flushwhiledrop.py b/tests/pytest/insert/flushwhiledrop.py new file mode 100644 index 0000000000000000000000000000000000000000..3784657117ad9e5a24851a2e6ac11e11594f0c72 --- /dev/null +++ b/tests/pytest/insert/flushwhiledrop.py @@ -0,0 +1,67 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import threading +from util.log import * +from util.cases import * +from util.sql import * +from time import sleep + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.numberOfRecords = 15000 + self.ts = 1601481600000 + + def run(self): + tdSql.execute('create database test cache 1 blocks 3') + tdSql.execute('use test') + tdSql.execute('create table tb(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8))') + threads = [] + t1 = threading.Thread(target=self.insertAndFlush, args=()) + threads.append(t1) + t2 = threading.Thread(target=self.drop, args=()) + threads.append(t2) + for t in threads: + t.setDaemon(True) + t.start() + for t in threads: + t.join() + + def insertAndFlush(self): + finish = 0 + currts = self.ts + + while(finish < self.numberOfRecords): + sql = "insert into tb values" + for i in range(finish, self.numberOfRecords): + sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i) + finish = i + 1 + if (1048576 - len(sql)) < 16384: + break + tdSql.execute(sql) + + def drop(self): + sleep(30) + tdSql.execute('drop database test') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/insert/in_function.py b/tests/pytest/insert/in_function.py index d1fbfd702a857ab6d966aff46e04a32ac933c384..3f2e1a03cad0a74c665341ac04250ec8a239ad6f 100644 --- a/tests/pytest/insert/in_function.py +++ b/tests/pytest/insert/in_function.py @@ -18,7 +18,6 @@ from util.log import * from util.cases import * from util.sql import * - class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -27,6 +26,7 @@ class TDTestCase: def run(self): tdSql.prepare() # test case for https://jira.taosdata.com:18080/browse/TD-4568 + # test case for https://jira.taosdata.com:18080/browse/TD-4824 tdLog.info("=============== step1,check bool and tinyint data type") @@ -137,8 +137,28 @@ class TDTestCase: tdSql.checkData(0,1,'True') tdSql.checkData(0,2,'0') + tdLog.info("=============== step1.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_1 + where in_bool in (true,false) and in_tinyint in (0,127,-127) + and tin_bool in (true,false) and tin_tinyint in (0,127,-127) + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'True') + tdSql.checkData(0,2,'0') + tdSql.checkData(0,3,'False') + tdSql.checkData(0,4,'0') + tdSql.checkData(1,1,'False') + tdSql.checkData(1,2,'127') + tdSql.checkData(1,3,'False') + tdSql.checkData(1,4,'-127') + tdSql.checkData(2,1,'True') + tdSql.checkData(2,2,'-127') + tdSql.checkData(2,3,'True') + tdSql.checkData(2,4,'127') + - tdLog.info("=============== step1.3,drop normal table && create table") + tdLog.info("=============== step1.4,drop normal table && create table") cmd1 = 'drop table if exists normal_in_bool_tinyint_1 ;' cmd2 = 'create table normal_in_bool_tinyint_1 (ts timestamp,in_bool bool,in_tinyint tinyint) ; ' tdLog.info(cmd1) @@ -147,7 +167,7 @@ class TDTestCase: tdSql.execute(cmd2) - tdLog.info("=============== step1.4,insert normal table right data and check in function") + tdLog.info("=============== step1.5,insert normal table right data and check in function") cmd1 = 'insert into normal_in_bool_tinyint_1 values(now,\'true\',\'-127\') ;' tdLog.info(cmd1) tdSql.execute(cmd1) @@ -175,6 +195,17 @@ class TDTestCase: tdSql.checkData(0,1,'True') tdSql.checkData(0,2,'0') + cmd4 = '''select * from normal_in_bool_tinyint_1 + where in_bool in (true,false) and in_tinyint in (0,127,-127) + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'True') + tdSql.checkData(0,2,'0') + tdSql.checkData(1,1,'False') + tdSql.checkData(1,2,'127') + tdSql.checkData(2,1,'True') + tdSql.checkData(2,2,'-127') tdLog.info("=============== step2,check int、smallint and bigint data type") @@ -378,10 +409,39 @@ class TDTestCase: tdSql.query('select * from in_int_smallint_bigint_3 where in_big in (-9223372036854775807) order by ts desc') tdSql.checkData(0,1,'0') tdSql.checkData(0,2,'32767') - tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(0,3,'-9223372036854775807') + + + tdLog.info("=============== step2.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_2 + where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767) + and in_big in (0,9223372036854775807,-9223372036854775807) + and tin_int in (0,2147483647,-2147483647) and tin_small in (0,32767,-32767) + and tin_big in (0,9223372036854775807,-9223372036854775807) + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'0') + tdSql.checkData(0,2,'32767') + tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(0,4,'0') + tdSql.checkData(0,5,'32767') + tdSql.checkData(0,6,'-9223372036854775807') + tdSql.checkData(1,1,'-2147483647') + tdSql.checkData(1,2,'0') + tdSql.checkData(1,3,'9223372036854775807') + tdSql.checkData(1,4,'-2147483647') + tdSql.checkData(1,5,'0') + tdSql.checkData(1,6,'9223372036854775807') + tdSql.checkData(2,1,'2147483647') + tdSql.checkData(2,2,'-32767') + tdSql.checkData(2,3,'0') + tdSql.checkData(2,4,'2147483647') + tdSql.checkData(2,5,'-32767') + tdSql.checkData(2,6,'0') - tdLog.info("=============== step2.3,drop normal table && create table") + tdLog.info("=============== step2.4,drop normal table && create table") cmd1 = 'drop table if exists normal_int_smallint_bigint_1 ;' cmd2 = 'create table normal_int_smallint_bigint_1 (ts timestamp,in_int int,in_small smallint , in_big bigint) ; ' tdLog.info(cmd1) @@ -390,7 +450,7 @@ class TDTestCase: tdSql.execute(cmd2) - tdLog.info("=============== step2.4,insert normal table right data and check in function") + tdLog.info("=============== step2.5,insert normal table right data and check in function") cmd1 = 'insert into normal_int_smallint_bigint_1 values(now,\'2147483647\',\'-32767\',\'0\') ;' tdLog.info(cmd1) tdSql.execute(cmd1) @@ -437,7 +497,23 @@ class TDTestCase: tdSql.query('select * from normal_int_smallint_bigint_1 where in_big in (-9223372036854775807) order by ts desc') tdSql.checkData(0,1,'0') tdSql.checkData(0,2,'32767') - tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(0,3,'-9223372036854775807') + + cmd4 = '''select * from normal_int_smallint_bigint_1 + where in_int in (0,2147483647,-2147483647) and in_small in (0,32767,-32767) + and in_big in (0,9223372036854775807,-9223372036854775807) + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'0') + tdSql.checkData(0,2,'32767') + tdSql.checkData(0,3,'-9223372036854775807') + tdSql.checkData(1,1,'-2147483647') + tdSql.checkData(1,2,'0') + tdSql.checkData(1,3,'9223372036854775807') + tdSql.checkData(2,1,'2147483647') + tdSql.checkData(2,2,'-32767') + tdSql.checkData(2,3,'0') tdLog.info("=============== step3,check binary and nchar data type") @@ -560,7 +636,30 @@ class TDTestCase: tdSql.checkData(0,2,'北京涛思数据科技有限公司') - tdLog.info("=============== step3.3,drop normal table && create table") + tdLog.info("=============== step3.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_3 + where in_binary in (\'0\',\'TDengine\',\'TAOS\') + and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\') + and tin_binary in (\'0\',\'TDengine\',\'taosdataTDengine\') + and tin_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'北京涛思数据科技有限公司TDengine\') + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'TDengine') + tdSql.checkData(0,2,'北京涛思数据科技有限公司') + tdSql.checkData(0,3,'taosdataTDengine') + tdSql.checkData(0,4,'北京涛思数据科技有限公司TDengine') + tdSql.checkData(1,1,'TAOS') + tdSql.checkData(1,2,'涛思数据TAOSdata') + tdSql.checkData(1,3,'TDengine') + tdSql.checkData(1,4,'北京涛思数据科技有限公司') + tdSql.checkData(2,1,'0') + tdSql.checkData(2,2,'0') + tdSql.checkData(2,3,'0') + tdSql.checkData(2,4,'0') + + + tdLog.info("=============== step3.4,drop normal table && create table") cmd1 = 'drop table if exists normal_in_binary_nchar_1 ;' cmd2 = 'create table normal_in_binary_nchar_1 (ts timestamp,in_binary binary(8),in_nchar nchar(12)) ; ' tdLog.info(cmd1) @@ -569,7 +668,7 @@ class TDTestCase: tdSql.execute(cmd2) - tdLog.info("=============== step3.4,insert normal table right data and check in function") + tdLog.info("=============== step3.5,insert normal table right data and check in function") cmd1 = 'insert into normal_in_binary_nchar_1 values(now,\'0\',\'0\') ;' tdLog.info(cmd1) tdSql.execute(cmd1) @@ -598,124 +697,413 @@ class TDTestCase: tdSql.checkData(0,2,'北京涛思数据科技有限公司') tdSql.query('select * from normal_in_binary_nchar_1 where in_nchar in (\'北京涛思数据科技有限公司\') order by ts desc') tdSql.checkData(0,1,'TDengine') - tdSql.checkData(0,2,'北京涛思数据科技有限公司') + tdSql.checkData(0,2,'北京涛思数据科技有限公司') + + cmd4 = '''select * from normal_in_binary_nchar_1 + where in_binary in (\'0\',\'TDengine\',\'TAOS\') + and in_nchar in (\'0\',\'北京涛思数据科技有限公司\',\'涛思数据TAOSdata\') + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'TDengine') + tdSql.checkData(0,2,'北京涛思数据科技有限公司') + tdSql.checkData(1,1,'TAOS') + tdSql.checkData(1,2,'涛思数据TAOSdata') + tdSql.checkData(2,1,'0') + tdSql.checkData(2,2,'0') + - tdLog.info("=============== step4,check float and double data type,not support") + tdLog.info("=============== step4,check float and double data type") tdLog.info("=============== step4.1,drop table && create table") - cmd1 = 'drop table if exists in_float_double_1 ;' + cmd1 = 'drop table if exists in_ts_float_double_1 ;' + cmd2 = 'drop table if exists in_ts_float_double_2 ;' + cmd3 = 'drop table if exists in_ts_float_double_3 ;' cmd10 = 'drop table if exists in_stable_4 ;' - cmd11 = 'create stable in_stable_4(ts timestamp,in_float float,in_double double) tags (tin_float float,tin_double double) ;' - cmd12 = 'create table in_float_double_1 using in_stable_4 tags(\'666\',\'88888\') ; ' + cmd11 = 'create stable in_stable_4(ts timestamp,in_ts timestamp,in_float float,in_double double) tags (tin_ts timestamp,tin_float float,tin_double double) ;' + cmd12 = 'create table in_ts_float_double_1 using in_stable_4 tags(\'0\',\'0\',\'0\') ; ' + cmd13 = 'create table in_ts_float_double_2 using in_stable_4 tags(\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ; ' + cmd14 = 'create table in_ts_float_double_3 using in_stable_4 tags(\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ; ' tdLog.info(cmd1) tdSql.execute(cmd1) + tdLog.info(cmd2) + tdSql.execute(cmd2) + tdLog.info(cmd3) + tdSql.execute(cmd3) tdLog.info(cmd10) tdSql.execute(cmd10) tdLog.info(cmd11) tdSql.execute(cmd11) tdLog.info(cmd12) tdSql.execute(cmd12) + tdLog.info(cmd13) + tdSql.execute(cmd13) + tdLog.info(cmd14) + tdSql.execute(cmd14) tdLog.info("=============== step4.2,insert stable right data and check in function") - cmd1 = 'insert into in_float_double_1 values(now,\'888\',\'66666\') ;' + cmd1 = 'insert into in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;' tdLog.info(cmd1) - tdSql.execute(cmd1) + tdSql.execute(cmd1) + + tdSql.query('select * from in_stable_4 where in_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where in_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where in_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + + tdSql.query('select * from in_stable_4 where tin_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where tin_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + tdSql.query('select * from in_stable_4 where tin_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.checkData(0,4,'1970-01-01 08:00:00.000') + tdSql.checkData(0,5,0.00000) + tdSql.checkData(0,6,0.000000000) + + tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from in_ts_float_double_1 where in_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from in_ts_float_double_1 where in_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) - cmd2 = 'select * from in_stable_4 where in_float in (\'888\');' + cmd2 = 'insert into in_ts_float_double_2 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;' tdLog.info(cmd2) - tdSql.error(cmd2) - try: - tdSql.execute(cmd2) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + tdSql.execute(cmd2) + + tdSql.query('select * from in_stable_4 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where in_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where in_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where in_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + + tdSql.query('select * from in_stable_4 where tin_ts in (\'2020-01-01 08:00:00.001000\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where tin_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + tdSql.query('select * from in_stable_4 where tin_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.checkData(0,4,'2020-01-01 08:00:00.001') + tdSql.checkData(0,5,666.00000) + tdSql.checkData(0,6,-88888.000000000) + + tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from in_ts_float_double_2 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from in_ts_float_double_2 where in_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from in_ts_float_double_2 where in_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) - cmd3 = 'select * from in_stable_4 where in_double in (\'66666\');' + cmd3 = 'insert into in_ts_float_double_3 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;' tdLog.info(cmd3) - tdSql.error(cmd3) - try: - tdSql.execute(cmd3) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd4 = 'select * from in_stable_4 where tin_float in (\'666\');' - tdLog.info(cmd4) - tdSql.error(cmd4) - try: - tdSql.execute(cmd4) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd5 = 'select * from in_stable_4 where tin_double in (\'88888\');' - tdLog.info(cmd5) - tdSql.error(cmd5) - try: - tdSql.execute(cmd5) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd6 = 'select * from in_float_double_1 where in_float in (\'888\');' - tdLog.info(cmd6) - tdSql.error(cmd6) - try: - tdSql.execute(cmd6) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd7 = 'select * from in_float_double_1 where in_double in (\'66666\');' - tdLog.info(cmd7) - tdSql.error(cmd7) - try: - tdSql.execute(cmd7) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + tdSql.execute(cmd3) + + tdSql.query('select * from in_stable_4 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where in_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where in_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where in_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'2021-01-01 08:00:00.001000\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.query('select * from in_stable_4 where tin_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + + tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from in_ts_float_double_3 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from in_ts_float_double_3 where in_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from in_ts_float_double_3 where in_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + + + tdLog.info("=============== step4.3,multiple column and multiple tag check in function") + cmd1 = '''select * from in_stable_4 + where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\') + and in_float in (0.00000,666.00000,-888.00000) + and in_double in (0.000000000,66666.000000000,-88888.000000000) + and tin_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\') + and tin_float in (0.00000,666.00000,-888.00000) + and tin_double in (0.000000000,66666.000000000,-88888.000000000) + order by ts desc ;''' + tdLog.info(cmd1) + tdSql.query(cmd1) + tdSql.checkData(0,1,'2021-01-01 08:00:00.001000') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(0,4,'2021-01-01 08:00:00.001') + tdSql.checkData(0,5,-888.00000) + tdSql.checkData(0,6,66666.000000000) + tdSql.checkData(1,1,'2020-01-01 08:00:00.001000') + tdSql.checkData(1,2,666.00000) + tdSql.checkData(1,3,-88888.000000000) + tdSql.checkData(1,4,'2020-01-01 08:00:00.001') + tdSql.checkData(1,5,666.00000) + tdSql.checkData(1,6,-88888.000000000) + tdSql.checkData(2,1,'1970-01-01 08:00:00.000') + tdSql.checkData(2,2,0.00000) + tdSql.checkData(2,3,0.000000000) + tdSql.checkData(2,4,'1970-01-01 08:00:00.000') + tdSql.checkData(2,5,0.00000) + tdSql.checkData(2,6,0.000000000) + + - tdLog.info("=============== step4.3,drop normal table && create table") - cmd1 = 'drop table if exists normal_in_float_double_1 ;' - cmd2 = 'create table normal_in_float_double_1 (ts timestamp,in_float float,in_double double) ; ' + tdLog.info("=============== step4.4,drop normal table && create table") + cmd1 = 'drop table if exists normal_in_ts_float_double_1 ;' + cmd2 = 'create table normal_in_ts_float_double_1 (ts timestamp,in_ts timestamp,in_float float,in_double double) ; ' tdLog.info(cmd1) tdSql.execute(cmd1) tdLog.info(cmd2) tdSql.execute(cmd2) - tdLog.info("=============== step4.4,insert normal table right data and check in function") - cmd1 = 'insert into normal_in_float_double_1 values(now,\'888\',\'666666\') ;' + tdLog.info("=============== step4.5,insert normal table right data and check in function") + cmd1 = 'insert into normal_in_ts_float_double_1 values(now,\'0\',\'0\',\'0\') ;' tdLog.info(cmd1) - tdSql.execute(cmd1) + tdSql.execute(cmd1) - cmd2 = 'select * from normal_in_float_double_1 where in_float in (\'888\');' + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'0\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1970-01-01 08:00:00.000\') order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (0.00000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (0.000000000) order by ts desc') + tdSql.checkData(0,1,'1970-01-01 08:00:00.000') + tdSql.checkData(0,2,0.00000) + tdSql.checkData(0,3,0.000000000) + + cmd2 = 'insert into normal_in_ts_float_double_1 values(now,\'2020-01-01 08:00:00.001\',\'666\',\'-88888\') ;' tdLog.info(cmd2) - tdSql.error(cmd2) - try: - tdSql.execute(cmd2) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") - - cmd3 = 'select * from normal_in_float_double_1 where in_double in (\'66666\');' + tdSql.execute(cmd2) + + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1577836800001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2020-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (666.00000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (-88888.000000000) order by ts desc') + tdSql.checkData(0,1,'2020-01-01 08:00:00.001') + tdSql.checkData(0,2,666.00000) + tdSql.checkData(0,3,-88888.000000000) + + cmd3 = 'insert into normal_in_ts_float_double_1 values(now,\'2021-01-01 08:00:00.001\',\'-888.00000\',\'66666.000000000\') ;' tdLog.info(cmd3) - tdSql.error(cmd3) - try: - tdSql.execute(cmd3) - tdLog.exit("invalid operation: not supported filter condition") - except Exception as e: - tdLog.info(repr(e)) - tdLog.info("invalid operation: not supported filter condition") + tdSql.execute(cmd3) + + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'1609459200001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_ts in (\'2021-01-01 08:00:00.001\') order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_float in (-888.00000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.query('select * from normal_in_ts_float_double_1 where in_double in (66666.000000000) order by ts desc') + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + + cmd4 = '''select * from normal_in_ts_float_double_1 + where in_ts in (\'1609459200001\',\'2021-01-01 08:00:00.001\',\'1577836800001\',\'2020-01-01 08:00:00.001000\',\'0\',\'1970-01-01 08:00:00.000\') + and in_double in (0.000000000,66666.000000000,-88888.000000000) + and in_float in (0.00000,666.00000,-888.00000) + order by ts desc ;''' + tdLog.info(cmd4) + tdSql.query(cmd4) + tdSql.checkData(0,1,'2021-01-01 08:00:00.001') + tdSql.checkData(0,2,-888.00000) + tdSql.checkData(0,3,66666.000000000) + tdSql.checkData(1,1,'2020-01-01 08:00:00.001') + tdSql.checkData(1,2,666.00000) + tdSql.checkData(1,3,-88888.000000000) + tdSql.checkData(2,1,'1970-01-01 08:00:00.000') + tdSql.checkData(2,2,0.00000) + tdSql.checkData(2,3,0.000000000) + + def stop(self): tdSql.close() diff --git a/tests/pytest/insert/insertFromCSVOurofOrder.py b/tests/pytest/insert/insertFromCSV.py similarity index 58% rename from tests/pytest/insert/insertFromCSVOurofOrder.py rename to tests/pytest/insert/insertFromCSV.py index d4de85b7e93e78ad12962c54fbd1014615dc8e3b..c5d36485699dd2b798b353b614008be7234edfd4 100644 --- a/tests/pytest/insert/insertFromCSVOurofOrder.py +++ b/tests/pytest/insert/insertFromCSV.py @@ -28,16 +28,15 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.ts = 1500074556514 + self.ts = 1500074556514 + self.csvfile = "/tmp/csvfile.csv" + self.rows = 100000 def writeCSV(self): - with open('test3.csv','w', encoding='utf-8', newline='') as csvFile: + with open(self.csvfile, 'w', encoding='utf-8', newline='') as csvFile: writer = csv.writer(csvFile, dialect='excel') - for i in range(1000000): - newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000) - d = datetime.datetime.fromtimestamp(newTimestamp / 1000) - dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f")) - writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) + for i in range(self.rows): + writer.writerow([self.ts + i, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) def removCSVHeader(self): data = pd.read_csv("ordered.csv") @@ -45,23 +44,25 @@ class TDTestCase: data.to_csv("ordered.csv", header = False, index = False) def run(self): + self.writeCSV() + tdSql.prepare() - tdSql.execute("create table t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)") startTime = time.time() - tdSql.execute("insert into t1 file 'outoforder.csv'") + tdSql.execute("insert into t1 file '%s'" % self.csvfile) duration = time.time() - startTime - print("Out of Order - Insert time: %d" % duration) - tdSql.query("select count(*) from t1") - rows = tdSql.getData(0, 0) + print("Insert time: %d" % duration) + tdSql.query("select * from t1") + tdSql.checkRows(self.rows) - tdSql.execute("create table t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)") - startTime = time.time() - tdSql.execute("insert into t2 file 'ordered.csv'") - duration = time.time() - startTime - print("Ordered - Insert time: %d" % duration) - tdSql.query("select count(*) from t2") - tdSql.checkData(0,0, rows) + tdSql.execute("create table stb(ts timestamp, c1 int, c2 float, c3 int, c4 int) tags(t1 int, t2 binary(20))") + tdSql.execute("insert into t2 using stb(t1) tags(1) file '%s'" % self.csvfile) + tdSql.query("select * from stb") + tdSql.checkRows(self.rows) + + tdSql.execute("insert into t3 using stb tags(1, 'test') file '%s'" % self.csvfile) + tdSql.query("select * from stb") + tdSql.checkRows(self.rows * 2) def stop(self): tdSql.close() diff --git a/tests/pytest/insert/insert_locking.py b/tests/pytest/insert/insert_locking.py new file mode 100644 index 0000000000000000000000000000000000000000..0d780a7132fbc83b99e4f5b54fe17101ff4f35f9 --- /dev/null +++ b/tests/pytest/insert/insert_locking.py @@ -0,0 +1,178 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5021 + + tdLog.info("\n\n----------step1 : drop db and create db----------\n") + tdSql.execute('''drop database if exists db ;''') + tdSql.execute('''create database db ;''') + sql = '''show databases;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("\n\n----------step2 : create stable----------\n") + tdSql.execute('''create stable + db.stable_1 (ts timestamp, payload binary(256)) + tags(t1 binary(16),t2 int);''') + sql = '''show db.stables;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("\n\n----------step3 : create table and insert----------\n") + sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" unexpected token") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("DB error: syntax error near ', ;' (unexpected token)") + + sql = '''insert into db.table1(ts , payload) using db.stable_1 (t1 , t2) tags ("table_1" , 111) ( values (now, ;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" bind columns again") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("DB error: syntax error near ', ;' (bind columns again)") + + sql = '''insert into db.table1 using db.stable_1 (t1 , t2) tags ("table_1",111) (ts , payload) ( values (now, ;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" keyword VALUES or FILE required ") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("DB error: invalid SQL: (keyword VALUES or FILE required)") + + tdSql.execute('''insert into db.table1 using db.stable_1 (t1 , t2) + tags ("table_1" , 111) values ( now , 1) ''') + sql = '''select * from db.stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,1) + tdSql.checkData(0,2,'table_1') + + tdLog.info("\n\n----------step4 : create table and insert again----------\n") + sql = '''insert into db.table2 using db.stable_1 (t1) tags ("table_2") ( values (now, ;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" unexpected token") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("DB error: syntax error near ', ;' (unexpected token)") + + tdSql.execute('''insert into db.table2 using db.stable_1 (t1) + tags ("table_2") values ( now , 2) ''') + sql = '''select * from db.stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + tdSql.checkData(1,1,2) + tdSql.checkData(1,2,'table_2') + + tdLog.info("\n\n----------step5 : create table and insert without db----------\n") + tdSql.execute('''use db''') + sql = '''insert into table3 using stable_1 (t1) tags ("table_3") ( values (now, ;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" unexpected token") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("DB error: syntax error near ', ;' (unexpected token)") + + tdSql.execute('''insert into table3 using stable_1 (t1 , t2) + tags ("table_3" , 333) values ( now , 3) ''') + sql = '''select * from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + tdSql.checkData(2,1,3) + tdSql.checkData(2,2,'table_3') + + tdLog.info("\n\n----------step6 : create tables in one sql ----------\n") + sql = '''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4) + table5 using stable_1 (t1) tags ("table_5") ( values (now, ;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" unexpected token") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("DB error: syntax error near ', ;' (unexpected token)") + + tdSql.execute('''insert into table4 using stable_1 (t1) tags ("table_4") values (now, 4) + table5 using stable_1 (t1) tags ("table_5") values (now, 5) ''') + sql = '''select * from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(5) + tdSql.checkData(3,1,4) + tdSql.checkData(3,2,'table_4') + tdSql.checkData(4,1,5) + tdSql.checkData(4,2,'table_5') + + + sql = '''insert into table6 using stable_1 (t1) tags ("table_6") ( values (now, + table7 using stable_1 (t1) tags ("table_7") values (now, 7);''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" invalid SQL") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid SQL") + + tdSql.execute('''insert into table6 using stable_1 (t1 , t2) tags ("table_6" , 666) values (now, 6) + table7 using stable_1 (t1) tags ("table_7") values (now, 7) ''') + sql = '''select * from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(7) + tdSql.checkData(5,1,6) + tdSql.checkData(5,2,'table_6') + tdSql.checkData(6,1,7) + tdSql.checkData(6,2,'table_7') + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..ff3a32b0f79028ce4f612c12b41171a2bd45a765 --- /dev/null +++ b/tests/pytest/insert/line_insert.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2021 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self._conn = conn + + def run(self): + print("running {}".format(__file__)) + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test precision 'us'") + tdSql.execute('use test') + + tdSql.execute('create stable ste(ts timestamp, f int) tags(t1 bigint)') + + lines = [ "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns", + "ste,t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532ns", + "stf,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns", + "st,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000ns", + "ste,t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532ns", + "ste,t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532ns", + "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns", + "stf,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000ns" + ] + + code = self._conn.insertLines(lines) + print("insertLines result {}".format(code)) + + lines2 = [ "stg,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns", + "stg,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns" + ] + + code = self._conn.insertLines([ lines2[0] ]) + print("insertLines result {}".format(code)) + + self._conn.insertLines([ lines2[1] ]) + print("insertLines result {}".format(code)) + + tdSql.query("select * from st") + tdSql.checkRows(4) + + tdSql.query("select * from ste") + tdSql.checkRows(3) + + tdSql.query("select * from stf") + tdSql.checkRows(2) + + tdSql.query("select * from stg") + tdSql.checkRows(2) + + tdSql.query("show tables") + tdSql.checkRows(8) + + tdSql.query("describe stf") + tdSql.checkData(2, 2, 14) + + self._conn.insertLines([ + "sth,t1=4i64,t2=5f64,t4=5f64,ID=\"childtable\" c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641ms", + "sth,t1=4i64,t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933654ms" + ]) + tdSql.query('select tbname, * from sth') + tdSql.checkRows(2) + + tdSql.query('select tbname, * from childtable') + tdSql.checkRows(1) + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/retentionpolicy.py b/tests/pytest/insert/retentionpolicy.py index e0446113d6d1fb197490a09ebd1ebe4b5b12e66f..607ee26a59969f1ecafd4160e4f9db58e3af568a 100644 --- a/tests/pytest/insert/retentionpolicy.py +++ b/tests/pytest/insert/retentionpolicy.py @@ -71,13 +71,10 @@ class TDTestRetetion: tdDnodes.start(1) tdLog.info(cmd) + ttime = datetime.datetime.now() tdSql.execute(cmd) self.queryRows=tdSql.query('select * from test') - if self.queryRows==4: - self.checkRows(4,cmd) - return 0 - else: - self.checkRows(5,cmd) + self.checkRows(3,cmd) tdLog.info("=============== step3") tdDnodes.stop(1) os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=48))) @@ -92,7 +89,7 @@ class TDTestRetetion: tdLog.info(cmd) tdSql.execute(cmd) self.queryRows=tdSql.query('select * from test') - self.checkRows(6,cmd) + self.checkRows(3,cmd) tdLog.info("=============== step4") tdDnodes.stop(1) tdDnodes.start(1) @@ -100,7 +97,7 @@ class TDTestRetetion: tdLog.info(cmd) tdSql.execute(cmd) self.queryRows=tdSql.query('select * from test') - self.checkRows(5,cmd) + self.checkRows(4,cmd) tdLog.info("=============== step5") tdDnodes.stop(1) @@ -109,6 +106,23 @@ class TDTestRetetion: self.queryRows=tdSql.query('select * from test where ts > now-1d') self.checkRows(2,cmd) + tdLog.info("=============== step6") + tdDnodes.stop(1) + os.system("date -s '%s'"%(ttime + datetime.timedelta(seconds=(72*60*60-7)))) + tdDnodes.start(1) + while datetime.datetime.now() < (ttime + datetime.timedelta(seconds=(72*60*60-1))): + time.sleep(0.001) + cmd = 'select * from test' + self.queryRows=tdSql.query(cmd) + self.checkRows(4,cmd) + while datetime.datetime.now() <= (ttime + datetime.timedelta(hours=72)): + time.sleep(0.001) + time.sleep(0.01) + cmd = 'select * from test' + self.queryRows=tdSql.query(cmd) + print(tdSql.queryResult) + self.checkRows(3,cmd) + def stop(self): os.system("sudo timedatectl set-ntp true") os.system("date -s '%s'"%(datetime.datetime.now()+datetime.timedelta(hours=1))) diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json new file mode 100644 index 0000000000000000000000000000000000000000..d4ef8dbe97ca144f59c0b1c961fe930bfcdbfcb2 --- /dev/null +++ b/tests/pytest/query/nestedQuery/insertData.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb0_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100000, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/query/nestedQuery/nestedQueryJson.py b/tests/pytest/query/nestedQuery/nestedQueryJson.py new file mode 100644 index 0000000000000000000000000000000000000000..36a231a9165a15cb46cbc0c1d37152f90e54b03e --- /dev/null +++ b/tests/pytest/query/nestedQuery/nestedQueryJson.py @@ -0,0 +1,81 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + os.system("%staosdemo -f query/nestedQuery/insertData.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 200000) + + + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf query/nestedQuery/%s.sql" % testcaseFilename ) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/nestedQuery/queryInterval.py b/tests/pytest/query/nestedQuery/queryInterval.py new file mode 100644 index 0000000000000000000000000000000000000000..11c42c463ee7d863393f2921db24244718a49df8 --- /dev/null +++ b/tests/pytest/query/nestedQuery/queryInterval.py @@ -0,0 +1,106 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +import random + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts1 = 1593548685000 + self.ts2 = 1593548785000 + + + def run(self): + # tdSql.execute("drop database db ") + tdSql.prepare() + tdSql.execute("create table st (ts timestamp, num int, value int , t_instance int) tags (loc nchar(30))") + node = 5 + number = 10 + for n in range(node): + for m in range(number): + dt= m*300000+n*60000 # collecting'frequency is 10s + args1=(n,n,self.ts1+dt,n,100+2*m+2*n,10+m+n) + # args2=(n,self.ts2+dt,n,120+n,15+n) + tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d, %d)" % args1) + # tdSql.execute("insert into t1 using st tags('shanghai') values(%d, %d, %d, %d)" % args2) + + # interval function + tdSql.query("select avg(value) from st interval(10m)") + # print(tdSql.queryResult) + tdSql.checkRows(6) + tdSql.checkData(0, 0, "2020-07-01 04:20:00") + tdSql.checkData(1, 1, 107.4) + + # subquery with interval + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));") + tdSql.checkData(0, 0, 109.0) + + # subquery with interval and select two Column in parent query + tdSql.error("select ts,avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(10m));") + + # subquery with interval and sliding + tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(30s) limit 1;") + tdSql.checkData(0, 0, "2020-07-01 04:17:00") + tdSql.checkData(0, 1, 100) + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(30s));") + tdSql.checkData(0, 0, 111) + + # subquery with interval and offset + tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m);") + tdSql.checkData(0, 0, "2020-07-01 04:21:00") + tdSql.checkData(0, 1, 100) + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing0' interval(5m,1m) group by loc);") + tdSql.checkData(0, 0, 109) + + # subquery with interval,sliding and group by ; parent query with interval + tdSql.query("select avg(value) as avg_val from st where loc='beijing0' interval(8m) sliding(1m) group by loc limit 1 offset 52 ;") + tdSql.checkData(0, 0, "2020-07-01 05:09:00") + tdSql.checkData(0, 1, 118) + tdSql.query("select avg(avg_val) as ncst from(select avg(value) as avg_val from st where loc!='beijing0' interval(8m) sliding(1m) group by loc ) interval(5m);") + tdSql.checkData(1, 1, 105) + + # # subquery and parent query with interval and sliding + tdSql.query("select avg(avg_val) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(5m)) interval(10m) sliding(2m);") + tdSql.checkData(29, 0, "2020-07-01 05:10:00.000") + + # subquery and parent query with top and bottom + tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val desc;") + tdSql.checkData(0, 1, 117) + tdSql.query("select bottom(avg_val,3) from(select avg(value) as avg_val,num from st where loc!='beijing0' group by num) order by avg_val asc;") + tdSql.checkData(0, 1, 111) + + # + tdSql.query("select top(avg_val,2) from(select avg(value) as avg_val from st where loc='beijing1' interval(8m) sliding(3m));") + tdSql.checkData(0, 1, 120) + + # clear env + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf wal/%s.sql" % testcaseFilename ) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/nestedQuery/queryWithOrderLimit.py b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py new file mode 100644 index 0000000000000000000000000000000000000000..692b5b7d364bee2164bda6707443b29c4cef4d14 --- /dev/null +++ b/tests/pytest/query/nestedQuery/queryWithOrderLimit.py @@ -0,0 +1,87 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes +import random + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1593548685000 + self.tables = 10 + self.rowsPerTable = 100 + + + def run(self): + # tdSql.execute("drop database db ") + tdSql.prepare() + tdSql.execute("create table st (ts timestamp, num int, value int) tags (loc nchar(30))") + for i in range(self.tables): + for j in range(self.rowsPerTable): + args1=(i, i, self.ts + i * self.rowsPerTable + j * 10000, i, random.randint(1, 100)) + tdSql.execute("insert into t%d using st tags('beijing%d') values(%d, %d, %d)" % args1) + + tdSql.query("select * from (select * from st)") + tdSql.checkRows(self.tables * self.rowsPerTable) + + tdSql.query("select * from (select * from st limit 10)") + tdSql.checkRows(10) + + tdSql.query("select * from (select * from st order by ts desc limit 10)") + tdSql.checkRows(10) + + # bug: https://jira.taosdata.com:18080/browse/TD-5043 + tdSql.query("select * from (select * from st order by ts desc limit 10 offset 1000)") + tdSql.checkRows(0) + + tdSql.query("select avg(value), sum(value) from st group by tbname") + tdSql.checkRows(self.tables) + + tdSql.query("select * from (select avg(value), sum(value) from st group by tbname)") + tdSql.checkRows(self.tables) + + tdSql.query("select avg(value), sum(value) from st group by tbname slimit 5") + tdSql.checkRows(5) + + tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5)") + tdSql.checkRows(5) + + tdSql.query("select avg(value), sum(value) from st group by tbname slimit 5 soffset 7") + tdSql.checkRows(3) + + tdSql.query("select * from (select avg(value), sum(value) from st group by tbname slimit 5 soffset 7)") + tdSql.checkRows(3) + + # https://jira.taosdata.com:18080/browse/TD-5497 + tdSql.execute("create table tt(ts timestamp ,i int)") + tdSql.execute("insert into tt values(now, 11)(now + 1s, -12)") + tdSql.query("select * from (select max(i),0-min(i) from tt)") + tdSql.checkRows(1); + tdSql.checkData(0, 0, 11); + tdSql.checkData(0, 1, 12.0); + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/nestedQuery/queryWithSpread.py b/tests/pytest/query/nestedQuery/queryWithSpread.py new file mode 100644 index 0000000000000000000000000000000000000000..4e09b1b826f66a06b7ff5789a692173e899e6aa3 --- /dev/null +++ b/tests/pytest/query/nestedQuery/queryWithSpread.py @@ -0,0 +1,46 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table stb (ts timestamp, speed int) tags(t1 int);') + + for i in range(10): + tdSql.execute(f'create table tb_{i} using stb tags({i});') + tdSql.execute(f'insert into tb_{i} values(now, 0)') + tdSql.execute(f'insert into tb_{i} values(now+10s, {i})') + + tdSql.query('select max(col3) from (select spread(speed) as col3 from stb group by tbname);') + tdSql.checkData(0,0,'9.0') + tdSql.error('select max(col3) from (select spread(col3) as col3 from stb group by tbname);') + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/nestquery_last_row.py b/tests/pytest/query/nestquery_last_row.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4ada51744f620ca589266113acf1e3d8cfef43 --- /dev/null +++ b/tests/pytest/query/nestquery_last_row.py @@ -0,0 +1,264 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-4735 + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("=======last_row(*)========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + sql = '''select * from stable_1 + where loc = 'table_0';''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select last_row(*) from + (select * from stable_1 + where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last_row(*) from + (select * from stable_1);''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + tdSql.checkData(0,2,self.num-1) + tdSql.checkData(0,3,self.num-1) + tdSql.checkData(0,4,self.num-1) + tdSql.checkData(0,5,'False') + tdSql.checkData(0,6,'binary5.9') + tdSql.checkData(0,7,'nchar5.9') + tdSql.checkData(0,8,9.00000) + tdSql.checkData(0,9,9.000000000) + tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + tdSql.checkData(0,11,'table_5') + tdSql.checkData(0,12,5) + tdSql.checkData(0,13,5) + tdSql.checkData(0,14,5) + tdSql.checkData(0,15,5) + tdSql.checkData(0,16,'True') + tdSql.checkData(0,17,'binary5') + tdSql.checkData(0,18,'nchar5') + tdSql.checkData(0,21,'1970-01-01 08:00:00.000') + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from + (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + tdSql.checkData(0,2,self.num-1) + tdSql.checkData(0,3,self.num-1) + tdSql.checkData(0,4,self.num-1) + tdSql.checkData(0,5,'False') + tdSql.checkData(0,6,'binary5.9') + tdSql.checkData(0,7,'nchar5.9') + tdSql.checkData(0,8,9.00000) + tdSql.checkData(0,9,9.000000000) + tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + + # incorrect result, not support nest > 2 + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + #tdSql.checkRows(1) + #tdSql.checkData(0,1,self.num-1) + #tdSql.checkData(0,2,self.num-1) + #tdSql.checkData(0,3,self.num-1) + #tdSql.checkData(0,4,self.num-1) + #tdSql.checkData(0,5,'False') + #tdSql.checkData(0,6,'binary.9') + #tdSql.checkData(0,7,'nchar.9') + #tdSql.checkData(0,8,9.00000) + #tdSql.checkData(0,9,9.000000000) + #tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + + # bug 5055 + # sql = '''select last_row(*) from + # ((select * from stable_1) union all + # (select * from table_1) union all + # (select * from regular_table_1));''' + # tdSql.query(sql) + # tdSql.checkData(0,1,self.num-1) + + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + #tdSql.checkRows(1) + #tdSql.checkData(0,1,self.num-1) + #tdSql.checkData(0,2,self.num-1) + #tdSql.checkData(0,3,self.num-1) + #tdSql.checkData(0,4,self.num-1) + #tdSql.checkData(0,5,'False') + #tdSql.checkData(0,6,'binary.9') + #tdSql.checkData(0,7,'nchar.9') + #tdSql.checkData(0,8,9.00000) + #tdSql.checkData(0,9,9.000000000) + #tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + + # bug 5055 + # sql = '''select last_row(*) from + # ((select last_row(*) from stable_1) union all + # (select last_row(*) from table_1) union all + # (select last_row(*) from regular_table_1));''' + # tdSql.query(sql) + # tdSql.checkData(0,1,self.num-1) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + #tdSql.checkRows(1) + #tdSql.checkData(0,1,self.num-1) + #tdSql.checkData(0,2,self.num-1) + #tdSql.checkData(0,3,self.num-1) + #tdSql.checkData(0,4,self.num-1) + #tdSql.checkData(0,5,'False') + #tdSql.checkData(0,6,'binary.9') + #tdSql.checkData(0,7,'nchar.9') + #tdSql.checkData(0,8,9.00000) + #tdSql.checkData(0,9,9.000000000) + #tdSql.checkData(0,10,'2020-09-13 20:26:40.009') + + + sql = '''select last_row(*) from + (select * from stable_1) + having q_int>5;''' + tdLog.info(sql) + tdSql.error(sql) + try: + tdSql.execute(sql) + tdLog.exit(" having only works with group by") + except Exception as e: + tdLog.info(repr(e)) + tdLog.info("invalid operation: having only works with group by") + + #bug 5057 + # sql = '''select last_row(*) from + # (select * from (select * from stable_1))''' + # tdLog.info(sql) + # tdSql.error(sql) + # try: + # tdSql.execute(sql) + # tdLog.exit(" core dumped") + # except Exception as e: + # tdLog.info(repr(e)) + # tdLog.info("core dumped") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py new file mode 100644 index 0000000000000000000000000000000000000000..774a1e5f42403a6b5f67678e53be5e07beaccde2 --- /dev/null +++ b/tests/pytest/query/operator.py @@ -0,0 +1,536 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1600000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5074 + + startTime = time.time() + + tdSql.execute('''create stable stable_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create stable stable_2 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) + tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, + t_bool bool , t_binary binary(20) , t_nchar nchar(20) , + t_float float , t_double double , t_ts timestamp);''') + tdSql.execute('''create table table_0 using stable_1 + tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') + tdSql.execute('''create table table_1 using stable_1 + tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , + 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_2 using stable_1 + tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , + 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') + tdSql.execute('''create table table_3 using stable_1 + tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') + tdSql.execute('''create table table_4 using stable_1 + tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') + tdSql.execute('''create table table_5 using stable_1 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + tdSql.execute('''create table table_21 using stable_2 + tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') + #regular table + tdSql.execute('''create table regular_table_1 + (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, + q_bool bool , q_binary binary(20) , q_nchar nchar(20) , + q_float float , q_double double , q_ts timestamp) ;''') + + for i in range(self.num): + tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' + % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' + % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, + i, i, random.random(), random.random(), 1262304000001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' + % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, + i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' + % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), + random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), + random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), + random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' + % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) + tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' + % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) + + tdLog.info("========== operator=1(OP_TableScan) ==========") + tdLog.info("========== operator=7(OP_Project) ==========") + sql = '''select * from stable_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select * from regular_table_1''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") + sql = '''select last_row(*) from stable_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=6(OP_Aggregate) ==========") + sql = '''select last_row(*) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkData(0,1,self.num-1) + + tdLog.info("========== operator=9(OP_Limit) ==========") + sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' + tdSql.query(sql) + tdSql.checkRows(5) + sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' + tdSql.query(sql) + tdSql.checkRows(1) + + sql = '''select * from regular_table_1 ;''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + sql = '''select last_row(*) from (select * from regular_table_1);''' + tdSql.query(sql) + tdSql.checkRows(1) + tdSql.checkData(0,1,self.num-1) + + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=16(OP_DummyInput) ==========") + sql = '''select last_row(*) from + ((select last_row(*) from table_0) union all + (select last_row(*) from table_1) union all + (select last_row(*) from table_2));''' + tdSql.error(sql) + + sql = '''select last_row(*) from + ((select * from table_0 limit 5 offset 5) union all + (select * from table_1 limit 5 offset 5) union all + (select * from regular_table_1 limit 5 offset 5));''' + tdSql.error(sql) + + tdLog.info("========== operator=10(OP_SLimit) ==========") + sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' + tdSql.query(sql) + tdSql.checkRows(3) + + sql = '''select last_row(*) from + ((select * from table_0) union all + (select * from table_1) union all + (select * from table_2));''' + tdSql.error(sql) + + tdLog.info("========== operator=20(OP_Distinct) ==========") + tdLog.info("========== operator=4(OP_TagScan) ==========") + sql = '''select distinct(t_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(2) + sql = '''select distinct(loc) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + sql = '''select distinct(t_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(3) + sql = '''select distinct(tbname) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(6) + + tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") + sql = '''select last(q_int),first(q_int) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_bool),first(q_bool) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_binary),first(q_binary) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_float),first(q_float) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_double),first(q_double) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_ts),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), + first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), + last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), + first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=8(OP_Groupby) ==========") + sql = '''select stddev(q_int) from table_0 group by q_int;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' + tdSql.query(sql) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' + tdSql.query(sql) + + tdLog.info("========== operator=11(OP_TimeWindow) ==========") + sql = '''select last(q_int) from table_0 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), + first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=12(OP_SessionWindow) ==========") + sql = '''select count(*) from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*) from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 session(ts,1s);''' + tdSql.query(sql) + tdSql.checkRows(1) + + tdLog.info("========== operator=13(OP_Fill) ==========") + sql = '''select sum(q_int) from table_0 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + #TD-5190 + sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 + where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' + tdSql.query(sql) + tdSql.checkData(0,1,'None') + + tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") + sql = '''select avg(q_int) from stable_1 where ts=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having sum(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having avg(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having min(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having max(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having first(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from stable_1 group by loc having last(q_int)>=0;''' + tdSql.query(sql) + tdSql.checkData(0,0,'table_0') + + tdLog.info("========== operator=21(OP_Join) ==========") + sql = '''select t1.q_int,t2.q_int from + (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from table_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from table_0) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from stable_1) t1 , (select * from table_2) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2 + where t2.ts = t1.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select t1.*,t2.*,t3.* from + (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 + where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' + tdSql.query(sql) + tdSql.checkRows(self.num) + + tdLog.info("========== operator=22(OP_StateWindow) ==========") + sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from table_1 state_window(q_bigint);''' + tdSql.query(sql) + tdSql.checkRows(self.num) + sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), + sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), + sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), + sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), + sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), + sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) + from regular_table_1 state_window(q_smallint);''' + tdSql.query(sql) + tdSql.checkRows(6*self.num) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py index 93404afd5967e772b383b8e2439ec4e89f5a7029..a365369b21fc7429216f5c1e8c624bf856a744c1 100644 --- a/tests/pytest/query/query1970YearsAf.py +++ b/tests/pytest/query/query1970YearsAf.py @@ -187,19 +187,19 @@ class TDTestCase: "select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' " ) tdSql.checkRows(719) - + tdSql.query( "select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' " ) tdSql.checkRows(680) - + tdSql.query( - "select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " + "select diff(c1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " ) tdSql.checkRows(679) tdSql.query( - "select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" + "select t0,c1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" ) tdSql.checkRows(43200) diff --git a/tests/pytest/query/queryCnameDisplay.py b/tests/pytest/query/queryCnameDisplay.py new file mode 100644 index 0000000000000000000000000000000000000000..8864c0e37621c72ad39fb4249749244b1fbe8367 --- /dev/null +++ b/tests/pytest/query/queryCnameDisplay.py @@ -0,0 +1,104 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import string +import random +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getLongName(self, len, mode = "mixed"): + """ + generate long str + """ + chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len)) + return chars + + def checkRegularTableCname(self): + """ + check regular table cname + """ + # len(colName) <=64, generate cname list and make first param = 63 and second param = 65 + cname_list = [] + for i in range(10): + cname_list.append(self.getLongName(64)) + cname_list[0] = self.getLongName(63) + cname_list[1] = self.getLongName(65) + # create table and insert data + tdSql.execute("CREATE TABLE regular_table_cname_check (ts timestamp, pi1 int, pi2 bigint, pf1 float, pf2 double, ps1 binary(10), pi3 smallint, pi4 tinyint, pb1 bool, ps2 nchar(20))") + tdSql.execute('insert into regular_table_cname_check values (now, 1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') + tdSql.execute('insert into regular_table_cname_check values (now, 2, 3, 1.2, 2.3, "b", 2, 1, false, "aa");') + tdSql.execute('insert into regular_table_cname_check values (now, 3, 4, 1.3, 2.4, "c", 1, 3, true, "bb");') + + # select as cname with cname_list + sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]} from regular_table_cname_check' + sql_seq_no_as = sql_seq.replace('as ', '') + res = tdSql.getColNameList(sql_seq) + res_no_as = tdSql.getColNameList(sql_seq_no_as) + + # cname[1] > 64, it is expected to be equal to 64 + cname_list_1_expected = cname_list[1][:-1] + cname_list[1] = cname_list_1_expected + checkColNameList = tdSql.checkColNameList(res, cname_list) + checkColNameList = tdSql.checkColNameList(res_no_as, cname_list) + + def checkSuperTableCname(self): + """ + check super table cname + """ + # len(colName) <=64, generate cname list and make first param = 63 and second param = 65 + cname_list = [] + for i in range(19): + cname_list.append(self.getLongName(64)) + cname_list[0] = self.getLongName(63) + cname_list[1] = self.getLongName(65) + + # create table and insert data + tdSql.execute("create table super_table_cname_check (ts timestamp, pi1 int, pi2 bigint, pf1 float, pf2 double, ps1 binary(10), pi3 smallint, pi4 tinyint, pb1 bool, ps2 nchar(20)) tags (si1 int, si2 bigint, sf1 float, sf2 double, ss1 binary(10), si3 smallint, si4 tinyint, sb1 bool, ss2 nchar(20));") + tdSql.execute('create table st1 using super_table_cname_check tags (1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') + tdSql.execute('insert into st1 values (now, 1, 2, 1.1, 2.2, "a", 1, 1, true, "aa");') + tdSql.execute('insert into st1 values (now, 1, 1, 1.4, 2.3, "b", 3, 2, true, "aa");') + tdSql.execute('insert into st1 values (now, 1, 2, 1.1, 2.2, "a", 1, 1, false, "bb");') + + # select as cname with cname_list + sql_seq = f'select count(ts) as {cname_list[0]}, sum(pi1) as {cname_list[1]}, avg(pi2) as {cname_list[2]}, count(pf1) as {cname_list[3]}, count(pf2) as {cname_list[4]}, count(ps1) as {cname_list[5]}, min(pi3) as {cname_list[6]}, max(pi4) as {cname_list[7]}, count(pb1) as {cname_list[8]}, count(ps2) as {cname_list[9]}, count(si1) as {cname_list[10]}, count(si2) as {cname_list[11]}, count(sf1) as {cname_list[12]}, count(sf2) as {cname_list[13]}, count(ss1) as {cname_list[14]}, count(si3) as {cname_list[15]}, count(si4) as {cname_list[16]}, count(sb1) as {cname_list[17]}, count(ss2) as {cname_list[18]} from super_table_cname_check' + sql_seq_no_as = sql_seq.replace('as ', '') + res = tdSql.getColNameList(sql_seq) + res_no_as = tdSql.getColNameList(sql_seq_no_as) + + # cname[1] > 64, it is expected to be equal to 64 + cname_list_1_expected = cname_list[1][:-1] + cname_list[1] = cname_list_1_expected + checkColNameList = tdSql.checkColNameList(res, cname_list) + checkColNameList = tdSql.checkColNameList(res_no_as, cname_list) + + def run(self): + tdSql.prepare() + self.checkRegularTableCname() + self.checkSuperTableCname() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index 539ce5141fae9cfba7ba63b569179c0170ef428d..ac78c0518f36a847652499aaa5722d4585e42d44 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -51,7 +51,7 @@ class TDTestCase: tdSql.error("select last_row as latest from st") # query distinct on normal colnum - tdSql.error("select distinct tagtype from st") + #tdSql.error("select distinct tagtype from st") # query .. order by non-time field tdSql.error("select * from st order by name") diff --git a/tests/pytest/query/queryInterval.py b/tests/pytest/query/queryInterval.py index ce8d05ae50aa61646b20e07e9484d00559e92b49..d61e8cf288c97fc869f19cba6bd3d181dc60797c 100644 --- a/tests/pytest/query/queryInterval.py +++ b/tests/pytest/query/queryInterval.py @@ -114,8 +114,7 @@ class TDTestCase: tdSql.query("select first(ts),twa(c) from tb interval(14a)") tdSql.checkRows(6) - tdSql.query("select twa(c) from tb group by c") - tdSql.checkRows(4) + tdSql.error("select twa(c) from tb group by c") def stop(self): diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py index 720ae745cb9b3780f1ca7ffaf96d76eda5f307b1..2aa760624fcfdb511b92d511326df8938dc45c2d 100644 --- a/tests/pytest/query/queryPerformance.py +++ b/tests/pytest/query/queryPerformance.py @@ -45,27 +45,38 @@ class taosdemoQueryPerformace: sql = "select count(*) from test.meters" tableid = 1 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from test.meters" tableid = 2 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select count(*) from test.meters where loc='beijing'" tableid = 3 cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from test.meters where areaid=10" tableid = 4 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from test.t10 interval(10s)" tableid = 5 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select last_row(*) from meters" tableid = 6 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select * from meters" tableid = 7 cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'" tableid = 8 cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + + sql = "select last(*) from meters" + tableid = 9 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) cursor.close() diff --git a/tests/pytest/query/querySession.py b/tests/pytest/query/querySession.py index 620f755bcbdecf2b76a1dfd96282e646f286151c..216ff68b71e0286b1783056b7cbc1165a572f38b 100644 --- a/tests/pytest/query/querySession.py +++ b/tests/pytest/query/querySession.py @@ -51,38 +51,73 @@ class TDTestCase: tdSql.checkRows(15) tdSql.checkData(0, 1, 2) + # session(ts,5a) main query + tdSql.query("select count(*) from (select * from dev_001) session(ts,5a)") + tdSql.checkRows(15) + tdSql.checkData(0, 1, 2) + # session(ts,1s) tdSql.query("select count(*) from dev_001 session(ts,1s)") tdSql.checkRows(12) tdSql.checkData(0, 1, 5) + # session(ts,1s) main query + tdSql.query("select count(*) from (select * from dev_001) session(ts,1s)") + tdSql.checkRows(12) + tdSql.checkData(0, 1, 5) + tdSql.query("select count(*) from dev_001 session(ts,1000a)") tdSql.checkRows(12) tdSql.checkData(0, 1, 5) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1000a)") + tdSql.checkRows(12) + tdSql.checkData(0, 1, 5) + # session(ts,1m) tdSql.query("select count(*) from dev_001 session(ts,1m)") tdSql.checkRows(9) tdSql.checkData(0, 1, 8) + # session(ts,1m) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1m)") + tdSql.checkRows(9) + tdSql.checkData(0, 1, 8) + # session(ts,1h) tdSql.query("select count(*) from dev_001 session(ts,1h)") tdSql.checkRows(6) tdSql.checkData(0, 1, 11) + # session(ts,1h) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1h)") + tdSql.checkRows(6) + tdSql.checkData(0, 1, 11) + # session(ts,1d) tdSql.query("select count(*) from dev_001 session(ts,1d)") tdSql.checkRows(4) tdSql.checkData(0, 1, 13) + # session(ts,1d) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1d)") + tdSql.checkRows(4) + tdSql.checkData(0, 1, 13) + # session(ts,1w) tdSql.query("select count(*) from dev_001 session(ts,1w)") tdSql.checkRows(2) tdSql.checkData(0, 1, 15) + # session(ts,1w) + tdSql.query("select count(*) from (select * from dev_001) session(ts,1w)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 15) + # session with where tdSql.query("select count(*),first(tagtype),last(tagtype),avg(tagtype),sum(tagtype),min(tagtype),max(tagtype),leastsquares(tagtype, 1, 1),spread(tagtype),stddev(tagtype),percentile(tagtype,0) from dev_001 where ts <'2020-05-20 0:0:0' session(ts,1d)") + tdSql.checkRows(2) tdSql.checkData(0, 1, 13) tdSql.checkData(0, 2, 1) @@ -97,6 +132,20 @@ class TDTestCase: tdSql.checkData(0, 11, 1) tdSql.checkData(1, 11, 14) + # session with where main + + tdSql.query("select count(*),first(tagtype),last(tagtype),avg(tagtype),sum(tagtype),min(tagtype),max(tagtype),leastsquares(tagtype, 1, 1) from (select * from dev_001 where ts <'2020-05-20 0:0:0') session(ts,1d)") + + tdSql.checkRows(2) + tdSql.checkData(0, 1, 13) + tdSql.checkData(0, 2, 1) + tdSql.checkData(0, 3, 13) + tdSql.checkData(0, 4, 7) + tdSql.checkData(0, 5, 91) + tdSql.checkData(0, 6, 1) + tdSql.checkData(0, 7, 13) + tdSql.checkData(0, 8, '{slop:1.000000, intercept:0.000000}') + # tdsql err tdSql.error("select * from dev_001 session(ts,1w)") tdSql.error("select count(*) from st session(ts,1w)") diff --git a/tests/pytest/query/queryStateWindow.py b/tests/pytest/query/queryStateWindow.py new file mode 100644 index 0000000000000000000000000000000000000000..251dbef65841cc17b31046320a7e966426c5eeb1 --- /dev/null +++ b/tests/pytest/query/queryStateWindow.py @@ -0,0 +1,111 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.rowNum = 100000 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute( + "create table if not exists st (ts timestamp, t1 int, t2 timestamp, t3 bigint, t4 float, t5 double, t6 binary(10), t7 smallint, t8 tinyint, t9 bool, t10 nchar(10), t11 int unsigned, t12 bigint unsigned, t13 smallint unsigned, t14 tinyint unsigned ,t15 int) tags(dev nchar(50), tag2 binary(16))") + tdSql.execute( + 'CREATE TABLE if not exists dev_001 using st tags("dev_01", "tag_01")') + tdSql.execute( + 'CREATE TABLE if not exists dev_002 using st tags("dev_02", "tag_02")') + + print("==============step2") + + tdSql.execute( + "INSERT INTO dev_001 VALUES('2020-05-13 10:00:00.000', 1, '2020-05-13 10:00:00.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 254, 1)('2020-05-13 10:00:01.000', 1, '2020-05-13 10:00:01.000', 10, 3.1, 3.14, 'test', -10, -126, true, '测试', 15, 10, 65534, 253, 5)('2020-05-13 10:00:02.000', 10, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', 10, -127, false, '测试', 15, 10, 65534, 253, 10)('2020-05-13 10:00:03.000', 1, '2020-05-13 10:00:00.000', 11, 3.1, 3.14, 'test', -10, -126, true, '测试', 14, 12, 65532, 254, 15)") + + for i in range(self.rowNum): + tdSql.execute("insert into dev_002 (ts,t1) values(%d, %d,)" % (self.ts + i, i + 1)) + + tdSql.query("select count(ts) from dev_001 state_window(t1)") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 2) + tdSql.query("select count(ts) from dev_001 state_window(t3)") + tdSql.checkRows(2) + tdSql.checkData(1, 0, 2) + tdSql.query("select count(ts) from dev_001 state_window(t7)") + tdSql.checkRows(3) + tdSql.checkData(1, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t8)") + tdSql.checkRows(3) + tdSql.checkData(2, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t11)") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 3) + tdSql.query("select count(ts) from dev_001 state_window(t12)") + tdSql.checkRows(2) + tdSql.checkData(1, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t13)") + tdSql.checkRows(2) + tdSql.checkData(1, 0, 1) + tdSql.query("select count(ts) from dev_001 state_window(t14)") + tdSql.checkRows(3) + tdSql.checkData(1, 0, 2) + tdSql.query("select count(ts) from dev_002 state_window(t1)") + tdSql.checkRows(100000) + + # with all aggregate function + tdSql.query("select count(*),sum(t1),avg(t1),twa(t1),stddev(t15),leastsquares(t15,1,1),first(t15),last(t15),spread(t15),percentile(t15,90),t9 from dev_001 state_window(t9);") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 2) + tdSql.checkData(1, 1, 10) + tdSql.checkData(0, 2, 1) + # tdSql.checkData(0, 3, 1) + tdSql.checkData(0, 4, np.std([1,5])) + # tdSql.checkData(0, 5, 1) + tdSql.checkData(0, 6, 1) + tdSql.checkData(0, 7, 5) + tdSql.checkData(0, 8, 4) + tdSql.checkData(0, 9, 4.6) + tdSql.checkData(0, 10, 'True') + + # with where + tdSql.query("select avg(t15),t9 from dev_001 where t9='true' state_window(t9);") + tdSql.checkData(0, 0, 7) + tdSql.checkData(0, 1, 'True') + + # error + tdSql.error("select count(*) from dev_001 state_window(t2)") + tdSql.error("select count(*) from st state_window(t3)") + tdSql.error("select count(*) from dev_001 state_window(t4)") + tdSql.error("select count(*) from dev_001 state_window(t5)") + tdSql.error("select count(*) from dev_001 state_window(t6)") + tdSql.error("select count(*) from dev_001 state_window(t10)") + tdSql.error("select count(*) from dev_001 state_window(tag2)") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/max_table_length.py b/tests/pytest/table/max_table_length.py index ec34f3008fccdb200e645183a5cfdf577fc27503..366e20456ddbf71eabe7ff64c8d0597c3d5b5f6e 100644 --- a/tests/pytest/table/max_table_length.py +++ b/tests/pytest/table/max_table_length.py @@ -42,7 +42,7 @@ class TDTestCase: print("==============step3") tdLog.info("check int & binary") - tdSql.error("create table anal2 (ts timestamp ,i binary(16371),j int)") + # tdSql.error("create table anal2 (ts timestamp ,i binary(16371),j int)") tdSql.execute("create table anal2 (ts timestamp ,i binary(16370),j int)") tdSql.execute("create table anal3 (ts timestamp ,i binary(16366), j int, k int)") diff --git a/tests/pytest/tag_lite/timestamp.py b/tests/pytest/tag_lite/timestamp.py new file mode 100644 index 0000000000000000000000000000000000000000..fa71c2e15422e80076f2b6a6eb26e8198a69a569 --- /dev/null +++ b/tests/pytest/tag_lite/timestamp.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdLog.info('======================== dnode1 start') + tbPrefix = "ta_fl_tb" + mtPrefix = "ta_fl_mt" + tbNum = 10 + rowNum = 20 + totalNum = 200 + tdLog.info('=============== step1') + i = 0 + mt = "%s%d" % (mtPrefix, i) + tdSql.execute( + 'create table %s (ts timestamp, tbcol int) TAGS(tgcol float, tgTs timestamp, tgcol2 int)' %(mt)) + i = 0 + ts = 1605045600000 + tsStr = "2020-11-11 06:00:00" + while (i < 5): + tb = "%s%d" % (tbPrefix, i) + tdLog.info('create table %s using %s tags(%d, %d, %d)' % (tb, mt, i, ts + i, i)) + tdSql.execute('create table %s using %s tags(%d, %d, %d)' % (tb, mt, i, ts + i, i)) + x = 0 + while (x < rowNum): + ms = x * 60000 + #tdLog.info( + # "insert into %s values (%d, %d)" % + # (tb, 1605045600000 + ms, x)) + tdSql.execute( + "insert into %s values (%d, %d)" % + (tb, 1605045600000 + ms, x)) + x = x + 1 + i = i + 1 + tdLog.info('=============== step2') + tdSql.query('select * from %s' % (mt)) + tdSql.checkRows(5 * rowNum) + + tdSql.query('select * from %s where tgTs = %ld and tgcol2 = 0' % (mt, ts)) + tdSql.checkRows(rowNum) + + tdSql.query('select * from %s where tgTs = \"%s\" and tgcol2 = 0' % (mt, tsStr)) + tdSql.checkRows(rowNum) + + tdLog.info('=============== step3') + i = 0 + while (i < 5): + tb = "%s%d" % (tbPrefix, i + 100) + tdLog.info('create table %s using %s tags(%d, \"%s\", %d)' % (tb, mt, i + 100, tsStr, i + 100)) + tdSql.execute('create table %s using %s tags(%d, \"%s\", %d)' % (tb, mt, i + 100, tsStr, i + 100)) + x = 0 + while (x < rowNum): + ms = x * 60000 + #tdLog.info( + # "insert into %s values (%d, %d)" % + # (tb, 1605045600000 + ms, x)) + tdSql.execute( + "insert into %s values (%d, %d)" % + (tb, 1605045600000 + ms, x)) + x = x + 1 + i = i + 1 + + tdSql.query('select * from %s where tgTs = %ld and tgcol2 = 100' % (mt, ts)) + tdSql.checkRows(rowNum) + + tdSql.query('select * from %s where tgTs = \"%s\" and tgcol2 = 100' % (mt, tsStr)) + tdSql.checkRows(rowNum) + + tdLog.info('=============== step4') + + i = 0 + tb = "%s%d"%(tbPrefix, i + 1000) + tdSql.execute('insert into %s using %s tags(%d, \"%s\", %d) values(now, 10)' % (tb, mt, i + 100, tsStr, i + 1000)) + tdSql.execute('insert into %s using %s tags(%d, \"%s\", %d) values(now+2s, 10)' % (tb, mt, i + 100, tsStr, i + 1000)) + tdSql.execute('insert into %s using %s tags(%d, \"%s\", %d) values(now+3s, 10)' % (tb, mt, i + 100, tsStr, i + 1000)) + tdSql.query('select * from %s where tgTs = \"%s\" and tgcol2 = 1000' % (mt, tsStr)) + tdSql.checkRows(3) + + i = 0 + tb = "%s%d"%(tbPrefix, i + 10000) + tdSql.execute('create table %s using %s tags(%d, now, %d)' % (tb, mt, i + 10000,i + 10000)) + tdSql.checkRows(3) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/sub.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json similarity index 58% rename from tests/pytest/tools/taosdemoAllTest/sub.json rename to tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json index fe3c892a76bcc30678f60127d28ce79bf8682c18..93462d2c66cea62c21f0cc196652c94439f47bc0 100644 --- a/tests/pytest/tools/taosdemoAllTest/sub.json +++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfter1.json @@ -9,29 +9,33 @@ "confirm_parameter_prompt": "no", "specified_table_query": { - "concurrent":1, + "concurrent":2, "mode":"sync", "interval":0, - "restart":"yes", + "resubAfterConsume":1, + "endAfterConsume":1, "keepProgress":"yes", + "restart":"no", "sqls": [ { - "sql": "select * from stb00_0 ;", + "sql": "select * from stb00_0", "result": "./subscribe_res0.txt" }] }, "super_table_query": { "stblname": "stb0", - "threads":1, + "threads":2, "mode":"sync", - "interval":10000, - "restart":"yes", + "interval":1000, + "resubAfterConsume":1, + "endAfterConsume":1, "keepProgress":"yes", + "restart":"no", "sqls": [ { - "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", - "result": "./subscribe_res1.txt" + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" }] } } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json new file mode 100644 index 0000000000000000000000000000000000000000..4229f304e44fcda58a0e16b1e6445ebd339215d3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubAC1endAfterMin1.json @@ -0,0 +1,41 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":0, + "resubAfterConsume":1, + "endAfterConsume":-1, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res0.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":1000, + "resubAfterConsume":1, + "endAfterConsume":-1, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json new file mode 100644 index 0000000000000000000000000000000000000000..ac221905655e53e9053336be8fcaaa8b1070639c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json @@ -0,0 +1,41 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":0, + "resubAfterConsume":-1, + "endAfterConsume":-1, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res0.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":1000, + "resubAfterConsume":-1, + "endAfterConsume":-1, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json new file mode 100644 index 0000000000000000000000000000000000000000..7d937212c94bd002307695c7059d67ad0a4e68d3 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json @@ -0,0 +1,41 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":0, + "resubAfterConsume":-1, + "endAfterConsume":0, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res0.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":0, + "resubAfterConsume":-1, + "endAfterConsume":0, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json new file mode 100644 index 0000000000000000000000000000000000000000..bf8927a58badfa606103d4b11d09f871ed64260f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfterNo0.json @@ -0,0 +1,41 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":0, + "resubAfterConsume":-1, + "endAfterConsume":1, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res0.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":1000, + "resubAfterConsume":-1, + "endAfterConsume":2, + "keepProgress":"no", + "restart":"yes", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py index 1401716da9095b44aa47e9ecb2e7131bc0a8b9ea..fe29409f296b310012773b9d78ca8735cfd52a13 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.py @@ -78,7 +78,7 @@ class TDTestCase: tdSql.checkData(0, 0, "%d" % suc_kill) os.system("rm -rf querySystemInfo*") os.system("rm -rf insert_res.txt") - os.system("rm -rf insert_res.txt") + os.system("rm -rf query_res.txt") def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json index a92906fa730833108ad758d3fc53c954279abe38..62b6e7472aa779888a45603b06cf54a528923dec 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/queryall.json @@ -13,7 +13,7 @@ "sqls":[ { "sql": "select * from stb0", - "result": "" + "result": "./query_res.txt" } ] } diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.csv b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.csv new file mode 100644 index 0000000000000000000000000000000000000000..d4138798e350bb1d0aff25819c01f87604b763bc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.csv @@ -0,0 +1,10 @@ +0,0,'TAOSdata-0' +1,1,'TAOSdata-1' +2,22,'TAOSdata-2' +3,333,'TAOSdata-3' +4,4444,'TAOSdata-4' +5,55555,'TAOSdata-5' +6,666666,'TAOSdata-6' +7,7777777,'TAOSdata-7' +8,88888888,'TAOSdata-8' +9,999999999,'TAOSdata-9' \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json new file mode 100644 index 0000000000000000000000000000000000000000..265f42036bc5a4e13dc0766b66fccf32924d7185 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv", + "tags_file": "./tools/taosdemoAllTest/TD-4985/query-limit-offset.csv", + "columns": [{"type": "INT","count":2}, {"type": "BINARY", "len": 16, "count":1}], + "tags": [{"type": "INT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py new file mode 100644 index 0000000000000000000000000000000000000000..4edef88cf182eee88e42615fb007bbe4756f0c7c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.py @@ -0,0 +1,191 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # test case for https://jira.taosdata.com:18080/browse/TD-4985 + os.system("%staosdemo -f tools/taosdemoAllTest/TD-4985/query-limit-offset.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10000) + + for i in range(1000): + tdSql.execute('''insert into stb00_9999 values(%d, %d, %d,'test99.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_8888 values(%d, %d, %d,'test98.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_7777 values(%d, %d, %d,'test97.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_6666 values(%d, %d, %d,'test96.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_5555 values(%d, %d, %d,'test95.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_4444 values(%d, %d, %d,'test94.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_3333 values(%d, %d, %d,'test93.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_2222 values(%d, %d, %d,'test92.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_1111 values(%d, %d, %d,'test91.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.execute('''insert into stb00_100 values(%d, %d, %d,'test90.%s')''' + % (1600000000000 + i, i, -10000+i, i)) + tdSql.query("select * from stb0 where c2 like 'test99%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_9999' limit 10 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test98%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_8888' limit 10 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test97%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_7777' limit 10 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test96%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_6666' limit 10 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test95%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_5555' limit 10 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test94%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_4444' limit 10 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test93%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_3333' limit 100 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test92%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_2222' limit 100 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test91%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_1111' limit 100 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + tdSql.query("select * from stb0 where c2 like 'test90%' ") + tdSql.checkRows(1000) + tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100" ) + tdSql.checkData(0, 1, 0) + tdSql.checkData(1, 1, 1) + tdSql.checkData(2, 1, 2) + tdSql.query("select * from stb0 where tbname like 'stb00_100' limit 100 offset 5" ) + tdSql.checkData(0, 1, 5) + tdSql.checkData(1, 1, 6) + tdSql.checkData(2, 1, 7) + + + os.system("rm -rf tools/taosdemoAllTest/TD-4985/query-limit-offset.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py new file mode 100644 index 0000000000000000000000000000000000000000..dfa829866d945b06d232aeeaba266b11ae229234 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py @@ -0,0 +1,703 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import random +import string +import os +import time +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +from util.dnodes import tdDnodes + +class TDTestCase: + updatecfgDict={'maxSQLLength':1048576} + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1538548685000 + self.num = 100 + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + + def run(self): + tdSql.prepare() + # test case for https://jira.taosdata.com:18080/browse/TD-5213 + + print("==============step1, regular table, 1 ts + 4094 cols + 1 binary==============") + startTime = time.time() + sql = "create table regular_table_1(ts timestamp, " + for i in range(4094): + sql += "col%d int, " % (i + 1) + sql += "col4095 binary(22))" + tdLog.info(len(sql)) + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into regular_table_1 values(%d, " + for j in range(4094): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_1") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from regular_table_1") + tdSql.checkRows(self.num) + tdSql.checkCols(4096) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into regular_table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_1") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from regular_table_1") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4096) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into regular_table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_1") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from regular_table_1") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4096) + + + print("==============step2,regular table error col or value==============") + tdLog.info('test regular table exceeds row num') + # column > 4096 + sql = "create table regular_table_2(ts timestamp, " + for i in range(4095): + sql += "col%d int, " % (i + 1) + sql += "col4096 binary(22))" + tdLog.info(len(sql)) + tdSql.error(sql) + + # column > 4096 + sql = "insert into regular_table_1 values(%d, " + for j in range(4095): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.error(sql) + + # insert column < 4096 + sql = "insert into regular_table_1 values(%d, " + for j in range(4092): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.error(sql) + + # alter column > 4096 + sql = "alter table regular_table_1 add column max int; " + tdSql.error(sql) + + print("==============step3,regular table , mix data type==============") + startTime = time.time() + sql = "create table regular_table_3(ts timestamp, " + for i in range(2000): + sql += "col%d int, " % (i + 1) + for i in range(2000,4094): + sql += "col%d bigint, " % (i + 1) + sql += "col4095 binary(22))" + tdLog.info(len(sql)) + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into regular_table_3 values(%d, " + for j in range(4094): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_3") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from regular_table_3") + tdSql.checkRows(self.num) + tdSql.checkCols(4096) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + sql = "create table regular_table_4(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(4), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(10), " % (i + 1) + for i in range(4090,4094): + sql += "timestamp_%d timestamp, " % (i + 1) + sql += "col4095 binary(22))" + tdLog.info(len(sql)) + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into regular_table_4 values(%d, " + for j in range(500): + str = "'%s', " % random.randint(-2147483647,2147483647) + sql += str + for j in range(500,1000): + str = "'%s', " % random.randint(-32767,32767 ) + sql += str + for j in range(1000,1500): + str = "'%s', " % random.randint(-127,127) + sql += str + for j in range(1500,2000): + str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700) + sql += str + for j in range(2000,2500): + str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true','false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + for j in range(4090,4094): + str = "%s, " % (self.ts + j) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(self.num) + tdSql.checkCols(4096) + tdLog.info("end ,now new one") + + #insert null value + tdLog.info('test insert null value') + for i in range(self.num): + sql = "insert into regular_table_4 values(%d, " + for j in range(2500): + str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ]) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true' ,'false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + for j in range(4090,4094): + str = "%s, " % (self.ts + j) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 10000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4096) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into regular_table_4 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4096) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into regular_table_4 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4095) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from regular_table_4") + tdSql.checkData(0, 0, 4*self.num) + tdSql.query("select * from regular_table_4") + tdSql.checkRows(4*self.num) + tdSql.checkCols(4096) + + #define TSDB_MAX_BYTES_PER_ROW 49151[old:1024 && 16384] + #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset] + tdLog.info('test regular_table max bytes per row 49151') + sql = "create table regular_table_5(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + for i in range(4090,4094): + sql += "timestamp_%d timestamp, " % (i + 1) + sql += "col4095 binary(69))" + tdSql.execute(sql) + tdSql.query("select * from regular_table_5") + tdSql.checkCols(4096) + # TD-5324 + sql = "alter table regular_table_5 modify column col4095 binary(70); " + tdSql.error(sql) + + # drop and add + sql = "alter table regular_table_5 drop column col4095; " + tdSql.execute(sql) + sql = "select * from regular_table_5; " + tdSql.query(sql) + tdSql.checkCols(4095) + sql = "alter table regular_table_5 add column col4095 binary(70); " + tdSql.error(sql) + sql = "alter table regular_table_5 add column col4095 binary(69); " + tdSql.execute(sql) + sql = "select * from regular_table_5; " + tdSql.query(sql) + tdSql.checkCols(4096) + + #out TSDB_MAX_BYTES_PER_ROW 49151 + tdLog.info('test regular_table max bytes per row out 49151') + sql = "create table regular_table_6(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + for i in range(4090,4094): + sql += "timestamp_%d timestamp, " % (i + 1) + sql += "col4095 binary(70))" + tdLog.info(len(sql)) + tdSql.error(sql) + + + print("==============step4, super table , 1 ts + 4090 cols + 4 tags ==============") + startTime = time.time() + sql = "create stable stable_1(ts timestamp, " + for i in range(4090): + sql += "col%d int, " % (i + 1) + sql += "col4091 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdLog.info(len(sql)) + tdSql.execute(sql) + sql = '''create table table_0 using stable_1 + tags('table_0' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_0 values(%d, " + for j in range(4090): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_0") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_0") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + sql = '''create table table_1 using stable_1 + tags('table_1' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_1 values(%d, " + for j in range(2080): + sql += "'%d', " % random.randint(0,1000) + for j in range(2080,4080): + sql += "'%s', " % 'NULL' + for j in range(4080,4090): + sql += "'%s', " % random.randint(0,10000) + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_1") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_1") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into table_1 (ts,col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from table_1") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from table_1") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4092) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into table_1 (ts,col123,col2213,col331,col41,col523,col236,col71,col813,col912,col1320,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,1000) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from table_1") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from table_1") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4092) + + print("==============step5,stable table , mix data type==============") + sql = "create stable stable_3(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(4), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(10), " % (i + 1) + sql += "col4091 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdLog.info(len(sql)) + tdSql.execute(sql) + sql = '''create table table_30 using stable_3 + tags('table_30' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_30 values(%d, " + for j in range(500): + str = "'%s', " % random.randint(-2147483647,2147483647) + sql += str + for j in range(500,1000): + str = "'%s', " % random.randint(-32767,32767 ) + sql += str + for j in range(1000,1500): + str = "'%s', " % random.randint(-127,127) + sql += str + for j in range(1500,2000): + str = "'%s', " % random.randint(-922337203685477580700,922337203685477580700) + sql += str + for j in range(2000,2500): + str = "'%s', " % random.randint(-92233720368547758070,92233720368547758070) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true','false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_30") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_30") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + #insert null value + tdLog.info('test insert null value') + sql = '''create table table_31 using stable_3 + tags('table_31' , '1' , '2' , '3' );''' + tdSql.execute(sql) + + for i in range(self.num): + sql = "insert into table_31 values(%d, " + for j in range(2500): + str = "'%s', " % random.choice(['NULL' ,'NULL' ,'NULL' ,1 , 10 ,100 ,-100 ,-10, 88 ,66 ,'NULL' ,'NULL' ,'NULL' ]) + sql += str + for j in range(2500,3000): + str = "'%s', " % random.choice(['true' ,'false']) + sql += str + for j in range(3000,3500): + str = "'%s', " % random.randint(-9223372036854775807,9223372036854775807) + sql += str + for j in range(3500,3800): + str = "'%s', " % self.get_random_string(4) + sql += str + for j in range(3800,4090): + str = "'%s', " % self.get_random_string(10) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i)) + time.sleep(1) + tdSql.query("select count(*) from table_31") + tdSql.checkData(0, 0, self.num) + tdSql.query("select * from table_31") + tdSql.checkRows(self.num) + tdSql.checkCols(4092) + + #insert in order + tdLog.info('test insert in order') + for i in range(self.num): + sql = "insert into table_31 (ts,int_2,int_22,int_169,smallint_537,smallint_607,tinyint_1030,tinyint_1491,double_1629,double_1808,float_2075,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 1000)) + time.sleep(1) + tdSql.query("select count(*) from table_31") + tdSql.checkData(0, 0, 2*self.num) + tdSql.query("select * from table_31") + tdSql.checkRows(2*self.num) + tdSql.checkCols(4092) + + #insert out of order + tdLog.info('test insert out of order') + for i in range(self.num): + sql = "insert into table_31 (ts,int_169,float_2075,int_369,tinyint_1491,tinyint_1030,float_2360,smallint_537,double_1808,double_1608,double_1629,col4091) values(%d, " + for j in range(10): + str = "'%s', " % random.randint(0,100) + sql += str + sql += "'%s')" % self.get_random_string(22) + tdSql.execute(sql % (self.ts + i + 2000)) + time.sleep(1) + tdSql.query("select count(*) from table_31") + tdSql.checkData(0, 0, 3*self.num) + tdSql.query("select * from table_31") + tdSql.checkRows(3*self.num) + tdSql.checkCols(4092) + + #define TSDB_MAX_BYTES_PER_ROW 49151 TSDB_MAX_TAGS_LEN 16384 + #ts:8\int:4\smallint:2\bigint:8\bool:1\float:4\tinyint:1\nchar:4*()+2[offset]\binary:1*()+2[offset] + tdLog.info('test super table max bytes per row 49151') + sql = "create table stable_4(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + sql += "col4091 binary(101))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdSql.execute(sql) + sql = '''create table table_40 using stable_4 + tags('table_40' , '1' , '2' , '3' );''' + tdSql.execute(sql) + tdSql.query("select * from table_40") + tdSql.checkCols(4092) + tdSql.query("describe table_40") + tdSql.checkRows(4096) + + tdLog.info('test super table drop and add column or tag') + sql = "alter stable stable_4 drop column col4091; " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4095) + sql = "alter table stable_4 add column col4091 binary(102); " + tdSql.error(sql) + sql = "alter table stable_4 add column col4091 binary(101); " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4096) + + sql = "alter stable stable_4 drop tag tag_1; " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4095) + sql = "alter table stable_4 add tag tag_1 int; " + tdSql.execute(sql) + sql = "select * from stable_4; " + tdSql.query(sql) + tdSql.checkCols(4096) + sql = "alter table stable_4 add tag loc1 nchar(10); " + tdSql.error(sql) + + tdLog.info('test super table max bytes per row 49151') + sql = "create table stable_5(ts timestamp, " + for i in range(500): + sql += "int_%d int, " % (i + 1) + for i in range(500,1000): + sql += "smallint_%d smallint, " % (i + 1) + for i in range(1000,1500): + sql += "tinyint_%d tinyint, " % (i + 1) + for i in range(1500,2000): + sql += "double_%d double, " % (i + 1) + for i in range(2000,2500): + sql += "float_%d float, " % (i + 1) + for i in range(2500,3000): + sql += "bool_%d bool, " % (i + 1) + for i in range(3000,3500): + sql += "bigint_%d bigint, " % (i + 1) + for i in range(3500,3800): + sql += "nchar_%d nchar(20), " % (i + 1) + for i in range(3800,4090): + sql += "binary_%d binary(34), " % (i + 1) + sql += "col4091 binary(102))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdSql.error(sql) + + print("==============step6, super table error col ==============") + tdLog.info('test exceeds row num') + # column + tag > 4096 + sql = "create stable stable_2(ts timestamp, " + for i in range(4091): + sql += "col%d int, " % (i + 1) + sql += "col4092 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int) " + tdLog.info(len(sql)) + tdSql.error(sql) + + # column + tag > 4096 + sql = "create stable stable_2(ts timestamp, " + for i in range(4090): + sql += "col%d int, " % (i + 1) + sql += "col4091 binary(22))" + sql += " tags (loc nchar(10),tag_1 int,tag_2 int,tag_3 int,tag_4 int) " + tdLog.info(len(sql)) + tdSql.error(sql) + + # alter column + tag > 4096 + sql = "alter table stable_1 add column max int; " + tdSql.error(sql) + # TD-5322 + sql = "alter table stable_1 add tag max int; " + tdSql.error(sql) + # TD-5324 + sql = "alter table stable_4 modify column col4091 binary(102); " + tdSql.error(sql) + sql = "alter table stable_4 modify tag loc nchar(20); " + tdSql.query("select * from table_40") + tdSql.checkCols(4092) + tdSql.query("describe table_40") + tdSql.checkRows(4096) + + os.system("rm -rf tools/taosdemoAllTest/TD-5213/insert4096columns_not_use_taosdemo.py.sql") + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv new file mode 100755 index 0000000000000000000000000000000000000000..5b30be5b4c4d5c323141097af6207ffb8bb93449 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv @@ -0,0 +1,3 @@ +1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274, 3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318, 3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329, 3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091 +1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL +1,2,3,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,4,NULL,NULL,NULL,NULL,NULL,NULL,5,NULL,NULL,6,NULL,NULL,NULL,7,NULL,NULL,NULL,8,NULL,NULL,NULL,9,NULL,NULL,10 \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json new file mode 100755 index 0000000000000000000000000000000000000000..25af3a1041dbcd06319dd6abfeb82fd33240c013 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -0,0 +1,137 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 102400000, + "databases": [{ + "dbinfo": { + "name": "json", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb_old", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb_old_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv", + "tags_file": "", + "columns": [{"type": "INT","count":1000}, {"type": "BINARY", "len": 16, "count":20}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + },{ + "name": "stb_new", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb_new_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT","count":4000}, {"type": "BINARY", "len": 16, "count":90}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":3}] + },{ + "name": "stb_mix", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb_mix_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "TINYINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 20,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}], + "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}] + },{ + "name": "stb_excel", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb_excel_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "sample", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.csv", + "tags_file": "", + "columns": [{"type": "INT","count":500},{"type": "SMALLINT","count":500},{"type": "SMALLINT","count":500},{"type": "DOUBLE","count":500},{"type": "FLOAT","count":500},{"type": "BOOL","count":500},{"type": "BIGINT","count":500},{"type": "NCHAR","len": 19,"count":300},{"type": "BINARY","len": 34,"count":290},{"type": "BINARY","len": 101,"count":1}], + "tags": [{"type": "INT", "count":3}, {"type": "NCHAR", "len": 10, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py new file mode 100755 index 0000000000000000000000000000000000000000..eb844b6fe24338b0301c45b918967faec7debcc0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py @@ -0,0 +1,176 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + #-N:regular table -d:database name -t:table num -n:rows num per table -l:col num -y:force + #regular old && new + startTime = time.time() + os.system("%staosdemo -N -d regular_old -t 1 -n 10 -l 1023 -y" % binPath) + tdSql.execute("use regular_old") + tdSql.query("show tables;") + tdSql.checkRows(1) + tdSql.query("select * from d0;") + tdSql.checkCols(1024) + tdSql.query("describe d0;") + tdSql.checkRows(1024) + + os.system("%staosdemo -N -d regular_new -t 1 -n 10 -l 4095 -y" % binPath) + tdSql.execute("use regular_new") + tdSql.query("show tables;") + tdSql.checkRows(1) + tdSql.query("select * from d0;") + tdSql.checkCols(4096) + tdSql.query("describe d0;") + tdSql.checkRows(4096) + + #super table -d:database name -t:table num -n:rows num per table -l:col num -y:force + os.system("%staosdemo -d super_old -t 1 -n 10 -l 1021 -y" % binPath) + tdSql.execute("use super_old") + tdSql.query("show tables;") + tdSql.checkRows(1) + tdSql.query("select * from meters;") + tdSql.checkCols(1024) + tdSql.query("select * from d0;") + tdSql.checkCols(1022) + tdSql.query("describe meters;") + tdSql.checkRows(1024) + tdSql.query("describe d0;") + tdSql.checkRows(1024) + + os.system("%staosdemo -d super_new -t 1 -n 10 -l 4093 -y" % binPath) + tdSql.execute("use super_new") + tdSql.query("show tables;") + tdSql.checkRows(1) + tdSql.query("select * from meters;") + tdSql.checkCols(4096) + tdSql.query("select * from d0;") + tdSql.checkCols(4094) + tdSql.query("describe meters;") + tdSql.checkRows(4096) + tdSql.query("describe d0;") + tdSql.checkRows(4096) + tdSql.execute("create table stb_new1_1 using meters tags(1,2)") + tdSql.query("select * from stb_new1_1") + tdSql.checkCols(4094) + tdSql.query("describe stb_new1_1;") + tdSql.checkRows(4096) + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + # test case for https://jira.taosdata.com:18080/browse/TD-5213 + os.system("%staosdemo -f tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json -y " % binPath) + tdSql.execute("use json") + tdSql.query("select count (tbname) from stb_old") + tdSql.checkData(0, 0, 1) + + tdSql.query("select * from stb_old") + tdSql.checkRows(10) + tdSql.checkCols(1024) + + tdSql.query("select count (tbname) from stb_new") + tdSql.checkData(0, 0, 1) + + tdSql.query("select * from stb_new") + tdSql.checkRows(10) + tdSql.checkCols(4096) + tdSql.query("describe stb_new;") + tdSql.checkRows(4096) + tdSql.query("select * from stb_new_0") + tdSql.checkRows(10) + tdSql.checkCols(4091) + tdSql.query("describe stb_new_0;") + tdSql.checkRows(4096) + tdSql.execute("create table stb_new1_1 using stb_new tags(1,2,3,4,5)") + tdSql.query("select * from stb_new1_1") + tdSql.checkCols(4091) + tdSql.query("describe stb_new1_1;") + tdSql.checkRows(4096) + + tdSql.query("select count (tbname) from stb_mix") + tdSql.checkData(0, 0, 1) + + tdSql.query("select * from stb_mix") + tdSql.checkRows(10) + tdSql.checkCols(4096) + tdSql.query("describe stb_mix;") + tdSql.checkRows(4096) + tdSql.query("select * from stb_mix_0") + tdSql.checkRows(10) + tdSql.checkCols(4092) + tdSql.query("describe stb_mix_0;") + tdSql.checkRows(4096) + + tdSql.query("select count (tbname) from stb_excel") + tdSql.checkData(0, 0, 1) + + tdSql.query("select * from stb_excel") + tdSql.checkRows(10) + tdSql.checkCols(4096) + tdSql.query("describe stb_excel;") + tdSql.checkRows(4096) + tdSql.query("select * from stb_excel_0") + tdSql.checkRows(10) + tdSql.checkCols(4092) + tdSql.query("describe stb_excel_0;") + tdSql.checkRows(4096) + endTime = time.time() + print("total time %ds" % (endTime - startTime)) + + + os.system("rm -rf tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.py.sql") + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/convertResFile.py b/tests/pytest/tools/taosdemoAllTest/convertResFile.py index 52bb8f40d0f0a5a55450ecb4927067f37f862499..5ed2fec13b1e0722937023d829e5e9b9fa1ad623 100644 --- a/tests/pytest/tools/taosdemoAllTest/convertResFile.py +++ b/tests/pytest/tools/taosdemoAllTest/convertResFile.py @@ -2,6 +2,14 @@ from datetime import datetime import time import os +# class FileSeparaSpaceConvertcomma: +# def __init__(self): +# self.inputfile = "" +# self.oputfile = "" +# self.affectedRows = 0 + +# def ConvertFile(self, inputfile,): + os.system("awk -v OFS=',' '{$1=$1;print$0}' ./all_query_res0.txt > ./new_query_res0.txt") with open('./new_query_res0.txt','r+') as f0: contents = f0.readlines() diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json index f2dca662fddc5991a9dcdb8371dc0e4086868190..0ae3a7194f8320b3919f850e19861f7796d2a5cc 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -8,7 +8,7 @@ "thread_count": 4, "thread_count_create_tbl": 4, "result_file":"./insert_res.txt", - "confirm_parameter_prompt": "no", + "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 10, "num_of_records_per_req": 1000, diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json similarity index 78% rename from tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json rename to tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json index 55be0198916e3737d185deaa231885fbfa607c66..4f31351516e927b4ec7638540c0aca70ed54c022 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json +++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json @@ -71,7 +71,7 @@ "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", - "interlace_rows": 1000000, + "interlace_rows": 0, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, @@ -84,11 +84,37 @@ "columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, { "name": "stb2", "child_table_exists":"no", "childtable_count": 1, - "childtable_prefix": "stb01_", + "childtable_prefix": "stb02_", "auto_create_table": "no", "batch_create_tbl_num": 12, "data_source": "rand", @@ -97,7 +123,7 @@ "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", - "interlace_rows": 1000000, + "interlace_rows": 0, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, @@ -111,10 +137,10 @@ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }, { - "name": "stb3", + "name": "stb4", "child_table_exists":"no", "childtable_count": 1, - "childtable_prefix": "stb01_", + "childtable_prefix": "stb04_", "auto_create_table": "no", "batch_create_tbl_num": 12, "data_source": "rand", @@ -123,7 +149,7 @@ "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", - "interlace_rows": 1000000, + "interlace_rows": 100, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, @@ -133,7 +159,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "BINARY", "len": 16371, "count":1},{"type": "INT"}], + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6},{"type": "TINYINT"}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json new file mode 100644 index 0000000000000000000000000000000000000000..d9ac2072f1fb5f29f7b5e6540d20d04837e461c2 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json similarity index 95% rename from tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json rename to tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json index 42461b2f6fba85093a6a45883608b49277669568..e5e31f75ef2e7ede4a8d1eb202c298c6952559e4 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json @@ -55,7 +55,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] }] }] diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json similarity index 98% rename from tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json rename to tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 42f6ef2f2fe90f7eac23778542475f152794a509..1b56830189623d344168918f239887c3359b2645 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum1024.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -45,7 +45,7 @@ "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, + "interlace_rows": 1000, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json new file mode 100644 index 0000000000000000000000000000000000000000..91234d5e48af891c4dfd0fdfd88121e123bf4edc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertRestful.json b/tests/pytest/tools/taosdemoAllTest/insertRestful.json new file mode 100644 index 0000000000000000000000000000000000000000..d05e1c249f25c17c37e40626bf0d3c5a96e5fffe --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/insertRestful.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json similarity index 96% rename from tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json rename to tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json index 7c12a62764ecd129342d916092cf732fe202151f..f1aa981508f063adccd4cf2f5c6166a16deb9a23 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum1024.json +++ b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json @@ -55,7 +55,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "DOUBLE", "count":1024}], + "columns": [{"type": "DOUBLE", "count":4096}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] }] }] diff --git a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json index 5cf8114472e00d5ebc90b5dc762f22f9698f7d76..88218b4989d5e01178142aa9acf2332b34718826 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json +++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json @@ -11,8 +11,8 @@ "confirm_parameter_prompt": "no", "insert_interval": 0, "interlace_rows": 10, - "num_of_records_per_req": 100, - "max_sql_len": 10240000000, + "num_of_records_per_req": 1000000, + "max_sql_len": 1024000000, "databases": [{ "dbinfo": { "name": "db1", @@ -45,7 +45,7 @@ "childtable_limit": 0, "childtable_offset":0, "multi_thread_write_one_tbl": "no", - "interlace_rows": 0, + "interlace_rows": 10000, "insert_interval":0, "max_sql_len": 1024000, "disorder_ratio": 0, diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py index 703f755c31c7b325e34b93878e2e3175648834ef..077ced5d02c792b1c3344ea3e8b129038652b4b8 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-insert-offset.py @@ -60,7 +60,7 @@ class TDTestCase: tdSql.checkData(0, 0, 1000000) os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/taosdemoTestWithJson-1.py.sql") + os.system("rm -rf tools/taosdemoAllTest/moredemo-insert-offset.py.sql") def stop(self): diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreads0.json b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json new file mode 100644 index 0000000000000000000000000000000000000000..69557a784180acec3c6de059b9285df4d4b31456 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/querrThreads0.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times":3, + "specified_table_query": { + "query_interval": 0, + "concurrent": 1, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": 0, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json new file mode 100644 index 0000000000000000000000000000000000000000..9074ae8fd1049d2dbaedfff881feefd84583ca20 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/querrThreadsless0.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times":3, + "specified_table_query": { + "query_interval": 0, + "concurrent": 1, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": -1, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json b/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json new file mode 100644 index 0000000000000000000000000000000000000000..fd047dec9497c64f8b8f4300617fcc90563b67bc --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryConcurrent0.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times":3, + "specified_table_query": { + "query_interval": 0, + "concurrent": 0, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": 1, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json b/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json new file mode 100644 index 0000000000000000000000000000000000000000..96a54cfd09cda09f8a9ebed169527c13092c7d57 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryConcurrentless0.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times":3, + "specified_table_query": { + "query_interval": 0, + "concurrent": -1, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": 1, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json similarity index 89% rename from tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json rename to tests/pytest/tools/taosdemoAllTest/queryInsertdata.json index 79471be2044d3ea7c637b4b1e500cfcc8e6413a9..99138e36668971ee2e9aa0656b2ee76f262723e3 100644 --- a/tests/pytest/tools/taosdemoAllTest/speciQueryInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json @@ -35,7 +35,7 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 100, + "childtable_count": 10, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 10, @@ -54,13 +54,13 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }, { "name": "stb1", "child_table_exists":"no", - "childtable_count": 100, + "childtable_count": 10, "childtable_prefix": "stb01_", "auto_create_table": "no", "batch_create_tbl_num": 10, @@ -79,7 +79,7 @@ "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json new file mode 100644 index 0000000000000000000000000000000000000000..747f7b3c7e9ebb5720cae98811e136ece74d47e2 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0 , + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 1, "count":3}, {"type": "BINARY", "len": 2, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/queryQps.json b/tests/pytest/tools/taosdemoAllTest/queryQps.json index 67a1cf3eb39c045192b5d35f698e38506777cef2..7ebad5e2b2f5af687656c8eed041579d7de1e2c2 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryQps.json +++ b/tests/pytest/tools/taosdemoAllTest/queryQps.json @@ -9,8 +9,8 @@ "databases": "db", "query_times": 1, "specified_table_query": { - "query_interval": 0, - "concurrent": 1, + "query_interval": 10000, + "concurrent": 4, "sqls": [ { "sql": "select last_row(*) from stb00_0", @@ -24,8 +24,8 @@ }, "super_table_query": { "stblname": "stb1", - "query_interval":0, - "threads": 1, + "query_interval":20000, + "threads": 4, "sqls": [ { "sql": "select last_row(ts) from xxxx", diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json b/tests/pytest/tools/taosdemoAllTest/queryRestful.json similarity index 100% rename from tests/pytest/tools/taosdemoAllTest/speciQueryRestful.json rename to tests/pytest/tools/taosdemoAllTest/queryRestful.json diff --git a/tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json b/tests/pytest/tools/taosdemoAllTest/queryTaosc.json similarity index 100% rename from tests/pytest/tools/taosdemoAllTest/speciQueryTaosc.json rename to tests/pytest/tools/taosdemoAllTest/queryTaosc.json diff --git a/tests/pytest/tools/taosdemoAllTest/queryTimes0.json b/tests/pytest/tools/taosdemoAllTest/queryTimes0.json new file mode 100644 index 0000000000000000000000000000000000000000..63a13587728fa797a65794994c04378edb87a0c5 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryTimes0.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 0, + "specified_table_query": { + "query_interval": 0, + "concurrent": 1, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": 1, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json b/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json new file mode 100644 index 0000000000000000000000000000000000000000..039f7e10603cd2d06608cb24a2cb72356bd50728 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/queryTimesless0.json @@ -0,0 +1,37 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": -1, + "specified_table_query": { + "query_interval": 0, + "concurrent": 1, + "sqls": [ + { + "sql": "select last_row(*) from stb00_0", + "result": "./query_res0.txt" + }, + { + "sql": "select last_row(*) from stb00_99 ", + "result": "./query_res1.txt" + + }] + }, + "super_table_query": { + "stblname": "stb1", + "query_interval":0, + "threads": 1, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "./query_res2.txt" + } + ] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..b3e1024647ff14d0a4a47759e0c9aceab0ac5240 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 100, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1000, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 200, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..26d483f57da2c30c7ab5d466f6b0b2cb3e5450b0 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..b1cd882bbf38545d1a3e7d4999fc4f6e0d5c4025 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 10, + "disorder_range": 100, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 100, + "disorder_range": 1, + "timestamp_step": 1000, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..e541d663fc9f884a7206592271d5124da7746793 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json @@ -0,0 +1,181 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "NN123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "NNN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "NNY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "NY123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "NYN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "NYY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + } + ] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..f32d44240d7f5b717013878358e5d4db378ba354 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json @@ -0,0 +1,181 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 100, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "YN123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "YNN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"no", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "YNY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "123", + "childtable_count": 20, + "childtable_prefix": "YY123_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "no", + "childtable_count": 20, + "childtable_prefix": "YYN_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + },{ + "name": "stb", + "child_table_exists":"yes", + "auto_create_table": "yes", + "childtable_count": 20, + "childtable_prefix": "YYY_", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 5, + "childtable_limit": 40, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "now", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "TINYINT"}] + } + ] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..c9d93c2423612b3fb4c6ab1f2b5d577f3c64e8cd --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 20, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 150, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 151, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..7f94fa2e75b930489dc0106d1796df06af43967f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 100, + "interlace_rows": 0, + "num_of_records_per_req": 2000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval": 200, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..339a2555c87f01b8ec6ce84f018dd4787f39d7fd --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..7e39ddbc0d6233c23d3eb9d5f34e9f0cc6a64360 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 30, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-12-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..e83a04003324149803f040e61fa6750a20b2afbb --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbno", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..9502358de0e1eb92730dd6782d21bcaba4f67af5 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"yes", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"yes", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset":7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..5a500a12580e2fbe9aca206f962304f3310adb3f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 1 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"yes", + "childtable_count": 5, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 6, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 7, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 4, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 2, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset": 7, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":6}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..c3f11bf03dad7b7bbc25e2af16488bbd0719bf02 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "dbtest123", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset": 0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./tools/taosdemoAllTest/sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 10, + "childtable_limit": -1, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "./tools/taosdemoAllTest/tags.csv", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..d2143366d7c3928495d5a4ef6f83edb5014670f4 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file":"./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count":20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 20, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "start_timestamp": "2020-11-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..c6909c6278cdbc6fd85eea04fb7e4e859f6df5cd --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json @@ -0,0 +1,166 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10240000000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16374, "count":1}], + "tags": [{"type": "TINYINT", "count":12}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16370, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb03_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb02_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16375, "count":1},{"type": "INT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb04_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 100, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 16371, "count":3},{"type": "INT","count":6},{"type": "TINYINT"}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..a5cc009ffb4a5f769d63b8fc4ad1d74f04a76c4b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 0, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..d9678a58692af75e06c77451028151658f812a77 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 10, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": -1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..a448750f74b5ad7219c5f29d744729777f497053 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 5, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..4ec18c49d6c4614f55947d5ab3b9d9a9a84579af --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":0}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..c9dad3dc7f95a7b95682621103c945dff395d3b5 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 1000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1004}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..00c346678f884a06a0611116ad13e47117bad59f --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 50000, + "num_of_records_per_req": 50000, + "max_sql_len": 1025000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 100, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 100, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows":50000, + "childtable_limit": -1, + "childtable_offset":0, + "interlace_rows": 32767, + "insert_interval":0, + "max_sql_len": 1025000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2012-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "TINYINT", "count":1}], + "tags": [{"type": "TINYINT", "count":1}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..4e47b3b404847a267f47413f6ab297e35cc84b0b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 0, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..28e7bbb39bb5d2477842129936ed6584e617e25a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json @@ -0,0 +1,88 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": -1, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 1, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":1}, {"type": "BINARY", "len": 16, "count":2}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 2, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 12, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 2, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..39e38afefd7060b6c6a0241521029e84816b999b --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 100, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "DOUBLE", "count":4096}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..f219d3c7a57146a075599eff495ffe93533373ef --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10, + "num_of_records_per_req": 1000000, + "max_sql_len": 1024000000, + "databases": [{ + "dbinfo": { + "name": "db1", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 10000, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1}, {"type": "BIGINT", "count":1}, {"type": "float", "count":1}, {"type": "double", "count":1}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":127}, {"type": "BINARY", "len": 16, "count":2}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..ed3eb280f6869bed76de72bdf50b646bca4a245a --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json @@ -0,0 +1,65 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "databases": [ + { + "dbinfo": { + "name": "blf", + "drop": "yes" + }, + "super_tables": [ + { + "name": "p_0_topics", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "p_0_topics_", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 525600, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 1000, + "max_sql_len": 1048576, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 60000, + "start_timestamp": "2019-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + { + "type": "INT", + "count": 1 + }, + { + "type": "FLOAT", + "count": 1 + }, + { + "type": "BINARY", + "len": 12, + "count": 1 + } + ], + "tags": [ + { + "type": "BINARY", + "len": 12, + "count": 10 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json new file mode 100644 index 0000000000000000000000000000000000000000..2105398d55b80f14f2fcfcd08f752333e27c031c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json @@ -0,0 +1,62 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 10000, + "num_of_records_per_req": 10000, + "max_sql_len": 10240000000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 50, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 10, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "insert_rows": 1000, + "childtable_limit": 0, + "childtable_offset":0, + "multi_thread_write_one_tbl": "no", + "interlace_rows": 0, + "insert_interval":0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":1005}, {"type": "BINARY", "len": 16, "count":3075}, {"type": "BINARY", "len": 32, "count":6}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":7}] + }] + }] +} diff --git a/tests/pytest/tools/taosdemoAllTest/subAsync.json b/tests/pytest/tools/taosdemoAllTest/subAsync.json new file mode 100644 index 0000000000000000000000000000000000000000..67a3bf5aab85bc540b4b891039ba59960ff3f4b1 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subAsync.json @@ -0,0 +1,45 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"async", + "interval":0, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select ts from stb00_1", + "result": "./subscribe_res1.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"async", + "interval":0, + "restart":"yes", + "keepProgress":"yes", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }, + { + "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ", + "result": "./subscribe_res3.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json index 7d14d0ad4b888fc099becb176e84af54bb769f50..1f9d794990dcbc0daaee2076f2ae6dfd1249b132 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json @@ -35,26 +35,26 @@ "super_tables": [{ "name": "stb0", "child_table_exists":"no", - "childtable_count": 1, + "childtable_count": 2, "childtable_prefix": "stb00_", "auto_create_table": "no", "batch_create_tbl_num": 10, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 1, + "insert_rows": 10, "childtable_limit": 0, "childtable_offset": 0, "interlace_rows": 0, "insert_interval": 0, "max_sql_len": 1024000, "disorder_ratio": 0, - "disorder_range": 0, + "disorder_range": 1000, "timestamp_step": 1000, "start_timestamp": "2021-02-25 10:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", "tags_file": "", - "columns": [{"type": "BINARY", "len":50, "count":1}, {"type": "BINARY", "len": 16, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] }] }] diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json new file mode 100644 index 0000000000000000000000000000000000000000..d5d0578f07526c18d541391597a3236c99f27544 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json @@ -0,0 +1,86 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 0, + "num_of_records_per_req": 3000, + "max_sql_len": 1024000, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 365, + "minRows": 100, + "maxRows": 4096, + "comp":2, + "walLevel":1, + "cachelast":0, + "quorum":1, + "fsync":3000, + "update": 0 + }, + "super_tables": [{ + "name": "stb0", + "child_table_exists":"no", + "childtable_count": 200, + "childtable_prefix": "stb00_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-02-25 10:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }, + { + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 20, + "childtable_prefix": "stb01_", + "auto_create_table": "no", + "batch_create_tbl_num": 1000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 10, + "childtable_limit": 0, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1000, + "start_timestamp": "2021-02-25 10:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 1, "count":1}, {"type": "BINARY", "len": 3, "count":1}, {"type": "INT"}, {"type": "DOUBLE", "count":1}], + "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}] + }] + }] +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subSync.json b/tests/pytest/tools/taosdemoAllTest/subSync.json new file mode 100644 index 0000000000000000000000000000000000000000..aa0b2cd7a4b454fd3332d72a521d244b5e567869 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subSync.json @@ -0,0 +1,45 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":0, + "restart":"no", + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select ts from stb00_1", + "result": "./subscribe_res1.txt" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":10000, + "restart":"no", + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }, + { + "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ", + "result": "./subscribe_res3.txt" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json b/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json new file mode 100644 index 0000000000000000000000000000000000000000..625e4792cfa113166e0ba5b0ef068bb2109bf027 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subSyncResFileNull.json @@ -0,0 +1,49 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":2, + "mode":"sync", + "interval":0, + "restart":"no", + "keepProgress":"no", + "resubAfterConsume":-1, + "endAfterConsume":1, + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "" + }, + { + "sql": "select ts from stb00_1", + "result": "" + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":10000, + "restart":"no", + "keepProgress":"no", + "resubAfterConsume":-1, + "endAfterConsume":1, + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "" + }, + { + "sql": "select * from xxxx where ts > '2021-02-25 10:00:04.000' ", + "result": "" + }] + } + } \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json new file mode 100644 index 0000000000000000000000000000000000000000..6b2828822e4a35989ff9c0d69a469bb34a0e7a84 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100.json @@ -0,0 +1,439 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":1, + "mode":"sync", + "interval":0, + "restart":"no", + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb00_1", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_2", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_3", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_4", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_5", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_6", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_7", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_8", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_9", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_10 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_11 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_12 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_13 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_14 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_15 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_16 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_17 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_18 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_19 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_20 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_21 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_22 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_23 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_24 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_25 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_26 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_27 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_28 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_29 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_30 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_31 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_32 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_33 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_34 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_35 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_36 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_37 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_38 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_39 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_40 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_41 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_42 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_43 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_44 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_45 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_46 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_47 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_48 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_49 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_50 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_51 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_52 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_53 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_54 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_55 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_56 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_57 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_58 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_59 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_60", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_61", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_62", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_63", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_64", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_65", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_66", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_67", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_68", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_69", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_70 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_71 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_72 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_73 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_74 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_75 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_76 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_77 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_78 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_79 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_80 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_81 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_82 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_83 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_84 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_85 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_86 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_87 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_88 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_89 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_90 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_91 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_92 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_93 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_94 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_95 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_96 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_97 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_98 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_99 ", + "result": "./subscribe_res0.txt" + + }, + { + "sql": "select * from stb00_99 ", + "result": "./subscribe_res0.txt" + + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":0, + "restart":"no", + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json new file mode 100644 index 0000000000000000000000000000000000000000..c45a9ea48a147ae96256de60ab6d1f9c579f1431 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subSyncSpecMaxsql100Async.json @@ -0,0 +1,439 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":1, + "mode":"async", + "interval":0, + "restart":"no", + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from stb00_0", + "result": "./subscribe_res1.txt" + }, + { + "sql": "select * from stb00_1", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_2", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_3", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_4", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_5", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_6", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_7", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_8", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_9", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_10 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_11 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_12 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_13 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_14 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_15 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_16 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_17 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_18 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_19 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_20 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_21 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_22 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_23 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_24 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_25 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_26 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_27 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_28 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_29 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_30 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_31 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_32 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_33 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_34 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_35 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_36 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_37 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_38 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_39 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_40 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_41 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_42 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_43 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_44 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_45 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_46 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_47 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_48 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_49 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_50 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_51 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_52 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_53 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_54 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_55 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_56 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_57 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_58 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_59 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_60", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_61", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_62", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_63", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_64", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_65", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_66", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_67", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_68", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_69", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_70 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_71 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_72 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_73 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_74 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_75 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_76 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_77 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_78 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_79 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_80 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_81 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_82 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_83 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_84 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_85 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_86 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_87 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_88 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_89 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_90 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_91 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_92 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_93 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_94 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_95 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_96 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_97 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_98 ", + "result": "./subscribe_res0.txt" + }, + { + "sql": "select * from stb00_99 ", + "result": "./subscribe_res0.txt" + + }, + { + "sql": "select * from stb00_99 ", + "result": "./subscribe_res0.txt" + + }] + }, + "super_table_query": + { + "stblname": "stb0", + "threads":2, + "mode":"sync", + "interval":0, + "restart":"no", + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:01.000' ", + "result": "./subscribe_res2.txt" + }] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json new file mode 100644 index 0000000000000000000000000000000000000000..3214d35bf04aa3b66b336734469539f42ea50c4c --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100.json @@ -0,0 +1,426 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "super_table_query": + { + "stblname": "stb1", + "threads":4, + "mode":"sync", + "interval":0, + "restart":"no", + "resubAfterConsume":-1, + "endAfterConsume":1, + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res2.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json new file mode 100644 index 0000000000000000000000000000000000000000..075ec9cf5dc5fc75da2da4cde8a9358799af7cb9 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subSyncSuperMaxsql100Async.json @@ -0,0 +1,426 @@ +{ + "filetype":"subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "super_table_query": + { + "stblname": "stb1", + "threads":4, + "mode":"async", + "interval":0, + "restart":"no", + "resubAfterConsume":-1, + "endAfterConsume":1, + "keepProgress":"no", + "sqls": [ + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res2.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }, + { + "sql": "select * from xxxx where ts >= '2021-02-25 10:00:00.000' ", + "result": "./subscribe_res3.txt" + }] + } +} \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/sub_no_result.json b/tests/pytest/tools/taosdemoAllTest/sub_no_result.json new file mode 100644 index 0000000000000000000000000000000000000000..cdf7c2314ede28e9c3ccaa9d53864737ff3fac96 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/sub_no_result.json @@ -0,0 +1,25 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": + { + "concurrent":1, + "mode":"sync", + "interval": 0, + "restart":"yes", + "keepProgress":"no", + "endAfterConsume": 1100000, + "sqls": [ + { + "sql": "select * from st;", + "result": "" + }] + } + } + \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py new file mode 100644 index 0000000000000000000000000000000000000000..270eea17cb6c913719fb67c4b8f33065b0a0445d --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/subscribeNoResult.py @@ -0,0 +1,82 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import _thread + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1601481600000 + self.numberOfRecords = 1100000 + + def execCmdAndGetOutput(self, cmd): + r = os.popen(cmd) + text = r.read() + r.close() + return text + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + tdSql.prepare() + tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)") + tdSql.execute("create table t1 using st tags(0)") + currts = self.ts + finish = 0 + while(finish < self.numberOfRecords): + sql = "insert into t1 values" + for i in range(finish, self.numberOfRecords): + sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i) + finish = i + 1 + if (1048576 - len(sql)) < 16384: + break + tdSql.execute(sql) + + binPath = buildPath+ "/build/bin/" + + os.system("%staosdemo -f tools/taosdemoAllTest/sub_no_result.json -g 2>&1 | tee sub_no_result.log" % binPath) + test_line = int(self.execCmdAndGetOutput("cat sub_no_result.log | wc -l")) + if(test_line < 1100024): + tdLog.exit("failed test subscribeNoResult: %d != expected(1100024)" % test_line) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py index 638a9c49b9b8cfe0864e4a158d3bb9ffe0b7985f..4d80328c289921959303e6610888b4b1a20411dc 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py @@ -23,7 +23,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -39,7 +39,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,7 +48,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - # insert: create one or mutiple tables per sql and insert multiple rows per sql + # insert: create one or mutiple tables per sql and insert multiple rows per sql os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tnt1r.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -62,10 +62,26 @@ class TDTestCase: tdSql.query("select count(*) from stb01_1") tdSql.checkData(0, 0, 200) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 200000) + tdSql.checkData(0, 0, 200000) + + # restful connector insert data + os.system("%staosdemo -f tools/taosdemoAllTest/insertRestful.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 200) - # insert: create mutiple tables per sql and insert one rows per sql . + # insert: create mutiple tables per sql and insert one rows per sql . os.system("%staosdemo -f tools/taosdemoAllTest/insert-1s1tntmr.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") @@ -73,34 +89,34 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 10000) + tdSql.checkData(0, 0, 10000) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 100000) + tdSql.checkData(0, 0, 100000) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400000) + tdSql.checkData(0, 0, 400000) - # insert: using parament "insert_interval to controls spped of insert. + # insert: using parament "insert_interval to controls spped of insert. # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 os.system("%staosdemo -f tools/taosdemoAllTest/insert-interval-speed.json -y" % binPath) tdSql.execute("use db") tdSql.query("show stables") tdSql.checkData(0, 4, 100) tdSql.query("select count(*) from stb00_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 2000000) + tdSql.checkData(0, 0, 2000000) tdSql.query("show stables") tdSql.checkData(1, 4, 100) tdSql.query("select count(*) from stb01_0") - tdSql.checkData(0, 0, 20000) + tdSql.checkData(0, 0, 20000) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 2000000) - + tdSql.checkData(0, 0, 2000000) + # spend 2min30s for 3 testcases. # insert: drop and child_table_exists combination test - # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset os.system("%staosdemo -f tools/taosdemoAllTest/insert-nodbnodrop.json -y" % binPath) tdSql.error("show dbno.stables") os.system("%staosdemo -f tools/taosdemoAllTest/insert-newdb.json -y" % binPath) @@ -112,41 +128,41 @@ class TDTestCase: tdSql.query("select count (tbname) from stb2") tdSql.checkData(0, 0, 7) tdSql.query("select count (tbname) from stb3") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) tdSql.query("select count (tbname) from stb4") - tdSql.checkData(0, 0, 8) + tdSql.checkData(0, 0, 8) os.system("%staosdemo -f tools/taosdemoAllTest/insert-offset.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 240) + tdSql.checkData(0, 0, 240) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 220) + tdSql.checkData(0, 0, 220) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 180) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 160) os.system("%staosdemo -f tools/taosdemoAllTest/insert-newtable.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 150) + tdSql.checkData(0, 0, 150) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 360) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 360) + tdSql.checkData(0, 0, 360) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 340) tdSql.query("select count(*) from stb4") tdSql.checkData(0, 0, 400) os.system("%staosdemo -f tools/taosdemoAllTest/insert-renewdb.json -y" % binPath) - tdSql.execute("use db") + tdSql.execute("use db") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 50) + tdSql.checkData(0, 0, 50) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 120) + tdSql.checkData(0, 0, 120) tdSql.query("select count(*) from stb2") - tdSql.checkData(0, 0, 140) + tdSql.checkData(0, 0, 140) tdSql.query("select count(*) from stb3") tdSql.checkData(0, 0, 160) tdSql.query("select count(*) from stb4") @@ -155,15 +171,19 @@ class TDTestCase: # insert: let parament in json file is illegal, it'll expect error. tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge1024.json -y " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json -y " % binPath) tdSql.error("use db") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum1024.json -y " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insertSigcolumnsNum4096.json -y " % binPath) tdSql.error("select * from db.stb0") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum1024.json -y " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsAndTagNum4096.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(0) tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertColumnsNum0.json -y " % binPath) tdSql.execute("use db") @@ -173,13 +193,15 @@ class TDTestCase: os.system("%staosdemo -f tools/taosdemoAllTest/insertTagsNumLarge128.json -y " % binPath) tdSql.error("use db1") tdSql.execute("drop database if exists db") - os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar16384.json -y " % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json -y " % binPath) tdSql.query("select count(*) from db.stb0") tdSql.checkRows(1) tdSql.query("select count(*) from db.stb1") tdSql.checkRows(1) - tdSql.error("select * from db.stb3") + tdSql.error("select * from db.stb4") tdSql.error("select * from db.stb2") + tdSql.query("select count(*) from db.stb3") + tdSql.checkRows(1) tdSql.execute("drop database if exists db") os.system("%staosdemo -f tools/taosdemoAllTest/insertNumOfrecordPerReq0.json -y " % binPath) tdSql.error("select count(*) from db.stb0") @@ -201,10 +223,16 @@ class TDTestCase: tdSql.checkData(0, 0, "2019-10-01 00:00:00") tdSql.query("select last(ts) from blf.p_0_topics_6 ") tdSql.checkData(0, 0, "2020-09-29 23:59:00") + os.system("%staosdemo -f tools/taosdemoAllTest/insertMaxNumPerReq.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 5000000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 5000000) - # insert: timestamp and step + # insert: timestamp and step os.system("%staosdemo -f tools/taosdemoAllTest/insert-timestep.json -y " % binPath) tdSql.execute("use db") tdSql.query("show stables") @@ -213,13 +241,13 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 20) tdSql.query("select last(ts) from db.stb00_0") - tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 200) + tdSql.checkData(0, 0, 200) tdSql.query("select last(ts) from db.stb01_0") - tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 400) + tdSql.checkData(0, 0, 400) # # insert: disorder_ratio os.system("%staosdemo -f tools/taosdemoAllTest/insert-disorder.json -g 2>&1 -y " % binPath) @@ -229,14 +257,14 @@ class TDTestCase: tdSql.query("select count (tbname) from stb1") tdSql.checkData(0, 0, 1) tdSql.query("select count(*) from stb0") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 10) tdSql.query("select count(*) from stb1") - tdSql.checkData(0, 0, 10) + tdSql.checkData(0, 0, 10) # insert: sample json os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath) tdSql.execute("use dbtest123") - tdSql.query("select col2 from stb0") + tdSql.query("select c2 from stb0") tdSql.checkData(0, 0, 2147483647) tdSql.query("select * from stb1 where t1=-127") tdSql.checkRows(20) @@ -245,13 +273,13 @@ class TDTestCase: tdSql.query("select * from stb1 where t2=126") tdSql.checkRows(10) - # insert: test interlace parament + # insert: test interlace parament os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath) tdSql.execute("use db") tdSql.query("select count (tbname) from stb0") tdSql.checkData(0, 0, 100) tdSql.query("select count (*) from stb0") - tdSql.checkData(0, 0, 15000) + tdSql.checkData(0, 0, 15000) # # insert: auto_create @@ -290,8 +318,9 @@ class TDTestCase: tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes tdSql.checkRows(20) + testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf tools/taosdemoAllTest/taosdemoTestInsertWithJson.py.sql") + os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py new file mode 100644 index 0000000000000000000000000000000000000000..cce6c83a078914bde638584bd50d9f22119eef34 --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py @@ -0,0 +1,316 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1000) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_1") + tdSql.checkData(0, 0, 200) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 200000) + + + # insert: create mutiple tables per sql and insert one rows per sql . + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 10000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 100000) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400000) + + # insert: using parament "insert_interval to controls spped of insert. + # but We need to have accurate methods to control the speed, such as getting the speed value, checking the count and so on。 + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkData(0, 4, 100) + tdSql.query("select count(*) from stb00_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 2000000) + tdSql.query("show stables") + tdSql.checkData(1, 4, 100) + tdSql.query("select count(*) from stb01_0") + tdSql.checkData(0, 0, 20000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 2000000) + + # spend 2min30s for 3 testcases. + # insert: drop and child_table_exists combination test + # insert: using parament "childtable_offset and childtable_limit" to control table'offset point and offset + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json -y" % binPath) + tdSql.error("show dbno.stables") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-newdb-stmt.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 5) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 6) + tdSql.query("select count (tbname) from stb2") + tdSql.checkData(0, 0, 7) + tdSql.query("select count (tbname) from stb3") + tdSql.checkData(0, 0, 8) + tdSql.query("select count (tbname) from stb4") + tdSql.checkData(0, 0, 8) + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-offset-stmt.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 240) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 220) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 180) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-newtable-stmt.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 150) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 360) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 360) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 340) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 400) + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json -y" % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 50) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 120) + tdSql.query("select count(*) from stb2") + tdSql.checkData(0, 0, 140) + tdSql.query("select count(*) from stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from stb4") + tdSql.checkData(0, 0, 160) + + + # insert: let parament in json file is illegal, it'll expect error. + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNumLarge4096-stmt.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json -y " % binPath) + tdSql.error("select * from db.stb0") + # tdSql.execute("drop database if exists db") + # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json -y " % binPath) + # tdSql.query("select count(*) from db.stb0") + # tdSql.checkData(0, 0, 10000) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(0) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables like 'stb0%' ") + tdSql.checkData(0, 2, 11) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json -y " % binPath) + tdSql.error("use db1") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json -y " % binPath) + tdSql.query("select count(*) from db.stb0") + tdSql.checkRows(1) + tdSql.query("select count(*) from db.stb1") + tdSql.checkRows(1) + tdSql.error("select * from db.stb4") + tdSql.error("select * from db.stb2") + tdSql.query("select count(*) from db.stb3") + tdSql.checkRows(1) + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json -y " % binPath) + tdSql.error("select count(*) from db.stb0") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists db") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json -y " % binPath) + tdSql.error("use db") + tdSql.execute("drop database if exists blf") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertTimestepMulRowsLargeint16-stmt.json -y " % binPath) + tdSql.execute("use blf") + tdSql.query("select ts from blf.p_0_topics_7 limit 262800,1") + tdSql.checkData(0, 0, "2020-03-31 12:00:00.000") + tdSql.query("select first(ts) from blf.p_0_topics_2") + tdSql.checkData(0, 0, "2019-10-01 00:00:00") + tdSql.query("select last(ts) from blf.p_0_topics_6 ") + tdSql.checkData(0, 0, "2020-09-29 23:59:00") + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 5000000) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 5000000) + + + # insert: timestamp and step + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-timestep-stmt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 20) + tdSql.query("select last(ts) from db.stb00_0") + tdSql.checkData(0, 0, "2020-10-01 00:00:00.019000") + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 200) + tdSql.query("select last(ts) from db.stb01_0") + tdSql.checkData(0, 0, "2020-11-01 00:00:00.190000") + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 400) + + # # insert: disorder_ratio + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-disorder-stmt.json 2>&1 -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 1) + tdSql.query("select count (tbname) from stb1") + tdSql.checkData(0, 0, 1) + tdSql.query("select count(*) from stb0") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from stb1") + tdSql.checkData(0, 0, 10) + + # # insert: sample json + # os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-stmt.json -y " % binPath) + # tdSql.execute("use dbtest123") + # tdSql.query("select c2 from stb0") + # tdSql.checkData(0, 0, 2147483647) + # tdSql.query("select * from stb1 where t1=-127") + # tdSql.checkRows(20) + # tdSql.query("select * from stb1 where t2=127") + # tdSql.checkRows(10) + # tdSql.query("select * from stb1 where t2=126") + # tdSql.checkRows(10) + + # insert: test interlace parament + os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json -y " % binPath) + tdSql.execute("use db") + tdSql.query("select count (tbname) from stb0") + tdSql.checkData(0, 0, 100) + tdSql.query("select count (*) from stb0") + tdSql.checkData(0, 0, 15000) + + + # insert: auto_create + + tdSql.execute('drop database if exists db') + tdSql.execute('create database db') + tdSql.execute('use db') + os.system("%staosdemo -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json " % binPath) # drop = no, child_table_exists, auto_create_table varies + tdSql.execute('use db') + tdSql.query('show tables like \'NN123%\'') #child_table_exists = no, auto_create_table varies = 123 + tdSql.checkRows(20) + tdSql.query('show tables like \'NNN%\'') #child_table_exists = no, auto_create_table varies = no + tdSql.checkRows(20) + tdSql.query('show tables like \'NNY%\'') #child_table_exists = no, auto_create_table varies = yes + tdSql.checkRows(20) + tdSql.query('show tables like \'NYN%\'') #child_table_exists = yes, auto_create_table varies = no + tdSql.checkRows(0) + tdSql.query('show tables like \'NY123%\'') #child_table_exists = yes, auto_create_table varies = 123 + tdSql.checkRows(0) + tdSql.query('show tables like \'NYY%\'') #child_table_exists = yes, auto_create_table varies = yes + tdSql.checkRows(0) + + tdSql.execute('drop database if exists db') + os.system("%staosdemo -y -f tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json " % binPath) # drop = yes, child_table_exists, auto_create_table varies + tdSql.execute('use db') + tdSql.query('show tables like \'YN123%\'') #child_table_exists = no, auto_create_table varies = 123 + tdSql.checkRows(20) + tdSql.query('show tables like \'YNN%\'') #child_table_exists = no, auto_create_table varies = no + tdSql.checkRows(20) + tdSql.query('show tables like \'YNY%\'') #child_table_exists = no, auto_create_table varies = yes + tdSql.checkRows(20) + tdSql.query('show tables like \'YYN%\'') #child_table_exists = yes, auto_create_table varies = no + tdSql.checkRows(20) + tdSql.query('show tables like \'YY123%\'') #child_table_exists = yes, auto_create_table varies = 123 + tdSql.checkRows(20) + tdSql.query('show tables like \'YYY%\'') #child_table_exists = yes, auto_create_table varies = yes + tdSql.checkRows(20) + + testcaseFilename = os.path.split(__file__)[-1] + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/%s.sql" % testcaseFilename ) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py index 643cad942c6586486640ba125d520b46c93e3465..6021c9136ad235f3e9d07bb4f6654fdac54989e5 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py @@ -19,6 +19,9 @@ from util.sql import * from util.dnodes import * import time from datetime import datetime +import ast +# from assertpy import assert_that +import subprocess class TDTestCase: def init(self, conn, logSql): @@ -40,85 +43,145 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + + # 获取taosc接口查询的结果文件中的内容,返回每行数据,并断言数据的第一列内容。 + def assertfileDataTaosc(self,filename,expectResult): + self.filename = filename + self.expectResult = expectResult + with open("%s" % filename, 'r+') as f1: + for line in f1.readlines(): + queryResult = line.strip().split()[0] + self.assertCheck(filename,queryResult,expectResult) + + # 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。 + def getfileDataRestful(self,filename): + self.filename = filename + with open("%s" % filename, 'r+') as f1: + for line in f1.readlines(): + contents = line.strip() + if contents.find("data") != -1: + contentsDict = ast.literal_eval(contents) # 字符串转换为字典 + queryResult = contentsDict['data'][0][0] + break + return queryResult + + # 获取taosc接口查询次数 + def queryTimesTaosc(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # 获取restful接口查询次数 + def queryTimesRestful(self,filename): + self.filename = filename + command = 'cat %s |grep "200 OK" |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # 定义断言结果是否正确。不正确返回错误结果,正确即通过。 + def assertCheck(self,filename,queryResult,expectResult): + self.filename = filename + self.queryResult = queryResult + self.expectResult = expectResult + args0 = (filename, queryResult, expectResult) + assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - binPath = buildPath+ "/build/bin/" + binPath = buildPath+ "/build/bin/" + + # delete useless files + os.system("rm -rf ./query_res*") + os.system("rm -rf ./all_query*") + + # taosc query: query specified table and query super table + os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/queryTaosc.json" % binPath) + os.system("cat query_res0.txt* > all_query_res0_taosc.txt") + os.system("cat query_res1.txt* > all_query_res1_taosc.txt") + os.system("cat query_res2.txt* > all_query_res2_taosc.txt") + + # correct Times testcases + queryTimes0Taosc = self.queryTimesTaosc("all_query_res0_taosc.txt") + self.assertCheck("all_query_res0_taosc.txt",queryTimes0Taosc,6) + + queryTimes1Taosc = self.queryTimesTaosc("all_query_res1_taosc.txt") + self.assertCheck("all_query_res1_taosc.txt",queryTimes1Taosc,6) + + queryTimes2Taosc = self.queryTimesTaosc("all_query_res2_taosc.txt") + self.assertCheck("all_query_res2_taosc.txt",queryTimes2Taosc,20) + + # correct data testcase + self.assertfileDataTaosc("all_query_res0_taosc.txt","1604160000099") + self.assertfileDataTaosc("all_query_res1_taosc.txt","100") + self.assertfileDataTaosc("all_query_res2_taosc.txt","1604160000199") - # query: query specified table and query super table - os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath) - os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryTaosc.json" % binPath) - os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") - os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") - os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") - tdSql.execute("use db") - tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') - os.system("python3 tools/taosdemoAllTest/convertResFile.py") - tdSql.execute("insert into result0 file './test_query_res0.txt'") - tdSql.query("select ts from result0") - tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") - tdSql.query("select count(*) from result0") - tdSql.checkData(0, 0, 1) - with open('./all_query_res1.txt','r+') as f1: - result1 = int(f1.readline()) - tdSql.query("select count(*) from stb00_1") - tdSql.checkData(0, 0, "%d" % result1) - - with open('./all_query_res2.txt','r+') as f2: - result2 = int(f2.readline()) - d2 = datetime.fromtimestamp(result2/1000) - timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") - tdSql.query("select last_row(ts) from stb1") - tdSql.checkData(0, 0, "%s" % timest) + # delete useless files + os.system("rm -rf ./query_res*") + os.system("rm -rf ./all_query*") + + + # use restful api to query + os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertrestdata.json" % binPath) + os.system("%staosdemo -f tools/taosdemoAllTest/queryRestful.json" % binPath) + os.system("cat query_res0.txt* > all_query_res0_rest.txt") + os.system("cat query_res1.txt* > all_query_res1_rest.txt") + os.system("cat query_res2.txt* > all_query_res2_rest.txt") - # # delete useless files - # os.system("rm -rf ./insert_res.txt") - # os.system("rm -rf tools/taosdemoAllTest/*.py.sql") - # os.system("rm -rf ./querySystemInfo*") - # os.system("rm -rf ./query_res*") - # os.system("rm -rf ./all_query*") - # os.system("rm -rf ./test_query_res0.txt") - - - # # use restful api to query - # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryInsertdata.json" % binPath) - # os.system("%staosdemo -f tools/taosdemoAllTest/speciQueryRestful.json" % binPath) - # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") - # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") - # # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") - # tdSql.execute("use db") - # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') - # os.system("python3 tools/taosdemoAllTest/convertResFile.py") - # tdSql.execute("insert into result0 file './test_query_res0.txt'") - # tdSql.query("select ts from result0") - # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") - # tdSql.query("select count(*) from result0") - # tdSql.checkData(0, 0, 1) - # with open('./all_query_res1.txt','r+') as f1: - # result1 = int(f1.readline()) - # tdSql.query("select count(*) from stb00_1") - # tdSql.checkData(0, 0, "%d" % result1) - - # with open('./all_query_res2.txt','r+') as f2: - # result2 = int(f2.readline()) - # d2 = datetime.fromtimestamp(result2/1000) - # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") - # tdSql.query("select last_row(ts) from stb1") - # tdSql.checkData(0, 0, "%s" % timest) + # correct Times testcases + queryTimes0Restful = self.queryTimesRestful("all_query_res0_rest.txt") + self.assertCheck("all_query_res0_rest.txt",queryTimes0Restful,6) + + queryTimes1Restful = self.queryTimesRestful("all_query_res1_rest.txt") + self.assertCheck("all_query_res1_rest.txt",queryTimes1Restful,6) + queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt") + self.assertCheck("all_query_res2_rest.txt",queryTimes2Restful,4) + + # correct data testcase + data0 = self.getfileDataRestful("all_query_res0_rest.txt") + self.assertCheck('all_query_res0_rest.txt',data0,"2020-11-01 00:00:00.009") + + data1 = self.getfileDataRestful("all_query_res1_rest.txt") + self.assertCheck('all_query_res1_rest.txt',data1,10) + data2 = self.getfileDataRestful("all_query_res2_rest.txt") + self.assertCheck('all_query_res2_rest.txt',data2,"2020-11-01 00:00:00.004") + # query times less than or equal to 100 + os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) os.system("%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" % binPath) os.system("%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" % binPath) - # query result print QPS + #query result print QPS + os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) os.system("%staosdemo -f tools/taosdemoAllTest/queryQps.json" % binPath) + + # use illegal or out of range parameters query json file + os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath) + exceptcode = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimes0.json" % binPath) + assert exceptcode != 0 + + exceptcode0 = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimesless0.json" % binPath) + assert exceptcode0 != 0 + + exceptcode1 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrentless0.json" % binPath) + assert exceptcode1 != 0 + exceptcode2 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrent0.json" % binPath) + assert exceptcode2 != 0 + + exceptcode3 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreadsless0.json" % binPath) + assert exceptcode3 != 0 + + exceptcode4 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreads0.json" % binPath) + assert exceptcode4 != 0 # delete useless files os.system("rm -rf ./insert_res.txt") @@ -127,6 +190,8 @@ class TDTestCase: os.system("rm -rf ./query_res*") os.system("rm -rf ./all_query*") os.system("rm -rf ./test_query_res0.txt") + + def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py index 1275b6a8b5d9345147ad36351d4269f0968fff5d..3e967581a4491da4108b981ccd83949751406b82 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJson.py @@ -19,6 +19,8 @@ from util.sql import * from util.dnodes import * import time from datetime import datetime +import subprocess + class TDTestCase: def init(self, conn, logSql): @@ -40,7 +42,22 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + + # get the number of subscriptions + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + # assert results + def assertCheck(self,filename,subResult,expectResult): + self.filename = filename + self.subResult = subResult + self.expectResult = expectResult + args0 = (filename, subResult, expectResult) + assert subResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + def run(self): buildPath = self.getBuildPath() if (buildPath == ""): @@ -48,48 +65,136 @@ class TDTestCase: else: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/subSync.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + os.system("ps -ef |grep 'taosdemoAllTest/subSyncKeepStart.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + sleep(1) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + sleep(2) + # subscribe: sync + os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/subSync.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subSync.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + # insert extral data + tdSql.execute("use db") + tdSql.execute("insert into stb00_0 values(1614218412000,'R','bf3',8637,98.861045)") + tdSql.execute("insert into stb00_1 values(1614218412000,'R','bf3',8637,78.861045)(1614218422000,'R','bf3',8637,98.861045)") + sleep(5) + + # merge result files + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt") + - # query: query specified table and query super table - # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath) - # os.system("%staosdemo -f tools/taosdemoAllTest/sub.json" % binPath) - # os.system("cat query_res0.txt* |sort -u > all_query_res0.txt") - # os.system("cat query_res1.txt* |sort -u > all_query_res1.txt") - # os.system("cat query_res2.txt* |sort -u > all_query_res2.txt") - # tdSql.execute("use db") - # tdSql.execute('create table result0 using stb0 tags(121,43,"beijing","beijing","beijing","beijing","beijing")') - # os.system("python3 tools/taosdemoAllTest/convertResFile.py") - # tdSql.execute("insert into result0 file './test_query_res0.txt'") - # tdSql.query("select ts from result0") - # tdSql.checkData(0, 0, "2020-11-01 00:00:00.099000") - # tdSql.query("select count(*) from result0") - # tdSql.checkData(0, 0, 1) - # with open('./all_query_res1.txt','r+') as f1: - # result1 = int(f1.readline()) - # tdSql.query("select count(*) from stb00_1") - # tdSql.checkData(0, 0, "%d" % result1) - - # with open('./all_query_res2.txt','r+') as f2: - # result2 = int(f2.readline()) - # d2 = datetime.fromtimestamp(result2/1000) - # timest = d2.strftime("%Y-%m-%d %H:%M:%S.%f") - # tdSql.query("select last_row(ts) from stb1") - # tdSql.checkData(0, 0, "%s" % timest) + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,22) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,24) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,21) + subTimes3 = self.subTimes("all_subscribe_res3.txt") + self.assertCheck("all_subscribe_res3.txt",subTimes3 ,13) - # # query times less than or equal to 100 - # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySpeciMutisql100.json" % binPath) - # os.system("%staosdemo -f tools/taosdemoAllTest/QuerySuperMutisql100.json" % binPath) + + # correct data testcase + os.system("kill -9 %d" % query_pid) + sleep(3) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + + # # sql number lager 100 + os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0 + assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0 + + # # result files is null + # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + # os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) + # # assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncResFileNull.json" % binPath) != 0 + + + + + # resubAfterConsume= -1 endAfter=-1 ; + os.system('kill -9 `ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'` ') + os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1.json & " % binPath) + sleep(2) + query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + print("get sub1 process'pid") + subres0Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1]) + subres2Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res2* |wc -l' )[1]) + assert 0==subres0Number1 , "subres0Number1 error" + assert 0==subres2Number1 , "subres2Number1 error" + tdSql.execute("insert into db.stb00_0 values(1614218412000,'R','bf3',8637,78.861045)(1614218413000,'R','bf3',8637,98.861045)") + sleep(4) + subres2Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1]) + subres0Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1]) + assert 0!=subres2Number2 , "subres2Number2 error" + assert 0!=subres0Number2 , "subres0Number2 error" + os.system("kill -9 %d" % query_pid1) + os.system("rm -rf ./subscribe_res*") + + # # resubAfterConsume= -1 endAfter=0 ; + # os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + # os.system('kill -9 `ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'` ') + # os.system("nohup %staosdemo -f tools/taosdemoAllTest/Resubjson/subSyncResubACMinus1endAfter0.json & " % binPath) + # sleep(2) + # query_pid1 = int(subprocess.getstatusoutput('ps aux|grep "subSyncResubACMinus1endAfter0.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + # print("get sub2 process'pid") + # subres0Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1]) + # subres2Number1 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res2* |wc -l' )[1]) + # assert 0==subres0Number1 , "subres0Number1 error" + # assert 0==subres2Number1 , "subres2Number1 error" + # tdSql.execute("insert into db.stb00_0 values(1614218412000,'R','bf3',8637,78.861045)(1614218413000,'R','bf3',8637,98.861045)") + # sleep(4) + # subres2Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1]) + # subres0Number2 =int(subprocess.getstatusoutput('grep "1614218412000" subscribe_res0* |wc -l' )[1]) + # assert 0!=subres2Number2 , "subres2Number2 error" + # assert 0!=subres0Number2 , "subres0Number2 error" + # os.system("kill -9 %d" % query_pid1) + # os.system("rm -rf ./subscribe_res*") + # # # merge result files + # os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + # os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + # os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + # # os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt") + + # sleep(3) + + # # correct subscribeTimes testcase + # subTimes0 = self.subTimes("all_subscribe_res0.txt") + # self.assertCheck("all_subscribe_res0.txt",subTimes0 ,3960) + + # subTimes1 = self.subTimes("all_subscribe_res1.txt") + # self.assertCheck("all_subscribe_res1.txt",subTimes1 ,40) + + # subTimes2 = self.subTimes("all_subscribe_res2.txt") + # self.assertCheck("all_subscribe_res2.txt",subTimes2 ,1900) + + + # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath) + # os.system("%staosdemo -f tools/taosdemoAllTest/subSupermaxsql100.json" % binPath) + + + # delete useless files - # os.system("rm -rf ./insert_res.txt") - # os.system("rm -rf tools/taosdemoAllTest/*.py.sql") - # os.system("rm -rf ./querySystemInfo*") - # os.system("rm -rf ./query_res*") - # os.system("rm -rf ./all_query*") - # os.system("rm -rf ./test_query_res0.txt") + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") def stop(self): tdSql.close() diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py new file mode 100644 index 0000000000000000000000000000000000000000..f2aa01e8703d9703d647507736130de2dd582bfb --- /dev/null +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSubWithJsonAsync.py @@ -0,0 +1,124 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import time +from datetime import datetime +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # 获取订阅次数 + def subTimes(self,filename): + self.filename = filename + command = 'cat %s |wc -l'% filename + times = int(subprocess.getstatusoutput(command)[1]) + return times + + def assertCheck(self,filename,queryResult,expectResult): + self.filename = filename + self.queryResult = queryResult + self.expectResult = expectResult + args0 = (filename, queryResult, expectResult) + assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0 + + def run(self): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath+ "/build/bin/" + + # clear env + os.system("ps -ef |grep 'taosdemoAllTest/subAsync.json' |grep -v 'grep' |awk '{print $2}'|xargs kill -9") + sleep(1) + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe_res*") + + # subscribe: resultfile + os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdata.json" % binPath) + os.system("nohup %staosdemo -f tools/taosdemoAllTest/subAsync.json &" % binPath) + query_pid = int(subprocess.getstatusoutput('ps aux|grep "taosdemoAllTest/subAsync.json" |grep -v "grep"|awk \'{print $2}\'')[1]) + + # insert extral data + tdSql.execute("use db") + tdSql.execute("insert into stb00_0 values(1614218412000,'R','bf3',8637,98.861045)") + tdSql.execute("insert into stb00_1 values(1614218412000,'R','bf3',8637,78.861045)(1614218422000,'R','bf3',8637,98.861045)") + sleep(5) + + # merge result files + os.system("cat subscribe_res0.txt* > all_subscribe_res0.txt") + os.system("cat subscribe_res1.txt* > all_subscribe_res1.txt") + os.system("cat subscribe_res2.txt* > all_subscribe_res2.txt") + os.system("cat subscribe_res3.txt* > all_subscribe_res3.txt") + + # correct subscribeTimes testcase + subTimes0 = self.subTimes("all_subscribe_res0.txt") + self.assertCheck("all_subscribe_res0.txt",subTimes0 ,22) + + subTimes1 = self.subTimes("all_subscribe_res1.txt") + self.assertCheck("all_subscribe_res1.txt",subTimes1 ,24) + + subTimes2 = self.subTimes("all_subscribe_res2.txt") + self.assertCheck("all_subscribe_res2.txt",subTimes2 ,21) + + subTimes3 = self.subTimes("all_subscribe_res3.txt") + self.assertCheck("all_subscribe_res3.txt",subTimes3 ,13) + + # correct data testcase + + os.system("kill -9 %d" % query_pid) + + # # query times less than or equal to 100 + os.system("%staosdemo -f tools/taosdemoAllTest/subInsertdataMaxsql100.json" % binPath) + assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSpecMaxsql100.json" % binPath) != 0 + assert os.system("%staosdemo -f tools/taosdemoAllTest/subSyncSuperMaxsql100.json" % binPath) != 0 + + # delete useless files + os.system("rm -rf ./insert_res.txt") + os.system("rm -rf tools/taosdemoAllTest/*.py.sql") + os.system("rm -rf ./subscribe_res*") + os.system("rm -rf ./all_subscribe*") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index a45393e22284d675584c5dddd71fc507bcb2563f..c8293ee31f713e34a7d974c23f8bec0b375847f1 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -16,11 +16,13 @@ import pandas as pd import argparse import os.path import json +from util.log import tdLog +from util.sql import tdSql class taosdemoPerformace: def __init__(self, commitID, dbName): self.commitID = commitID - self.dbName = dbName + self.dbName = dbName self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -30,8 +32,8 @@ class taosdemoPerformace: self.user, self.password, self.config) - self.insertDB = "insertDB"; - + self.insertDB = "insertDB" + def generateJson(self): db = { "name": "%s" % self.insertDB, @@ -41,7 +43,7 @@ class taosdemoPerformace: stb = { "name": "meters", - "child_table_exists":"no", + "child_table_exists": "no", "childtable_count": 10000, "childtable_prefix": "stb_", "auto_create_table": "no", @@ -57,12 +59,12 @@ class taosdemoPerformace: "start_timestamp": "2020-10-01 00:00:00.000", "sample_format": "csv", "sample_file": "./sample.csv", - "tags_file": "", + "tags_file": "", "columns": [ {"type": "INT", "count": 4} - ], + ], "tags": [ - {"type": "INT", "count":1}, + {"type": "INT", "count": 1}, {"type": "BINARY", "len": 16} ] } @@ -88,7 +90,7 @@ class taosdemoPerformace: "confirm_parameter_prompt": "no", "insert_interval": 0, "num_of_records_per_req": 30000, - "databases": [db] + "databases": [db] } insert_json_file = f"/tmp/insert.json" @@ -103,24 +105,56 @@ class taosdemoPerformace: cmd.close() return output - def insertData(self): - os.system("taosdemo -f %s > taosdemoperf.txt 2>&1" % self.generateJson()) - self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") - self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") - self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdemo" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def insertData(self): + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdemo not found!") + else: + tdLog.info("taosdemo found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + os.system( + "%staosdemo -f %s > taosdemoperf.txt 2>&1" % + (binPath, self.generateJson())) + self.createTableTime = self.getCMDOutput( + "grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") + self.insertRecordsTime = self.getCMDOutput( + "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") + self.recordsPerSecond = self.getCMDOutput( + "grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") self.commitID = self.getCMDOutput("git rev-parse --short HEAD") - delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $4}'") + delay = self.getCMDOutput( + "grep 'delay' taosdemoperf.txt | awk '{print $4}'") self.avgDelay = delay[:-4] - delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $6}'") + delay = self.getCMDOutput( + "grep 'delay' taosdemoperf.txt | awk '{print $6}'") self.maxDelay = delay[:-4] - delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $8}'") + delay = self.getCMDOutput( + "grep 'delay' taosdemoperf.txt | awk '{print $8}'") self.minDelay = delay[:-3] os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt") def createTablesAndStoreData(self): cursor = self.conn.cursor() - + cursor.execute("create database if not exists %s" % self.dbName) cursor.execute("use %s" % self.dbName) cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float)") @@ -130,13 +164,21 @@ class taosdemoPerformace: print("records per second: %f" % float(self.recordsPerSecond)) print("avg delay: %f" % float(self.avgDelay)) print("max delay: %f" % float(self.maxDelay)) - print("min delay: %f" % float(self.minDelay)) - cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" % - (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay))) + print("min delay: %f" % float(self.minDelay)) + cursor.execute( + "insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" % + (float( + self.createTableTime), float( + self.insertRecordsTime), float( + self.recordsPerSecond), self.commitID, float( + self.avgDelay), float( + self.maxDelay), float( + self.minDelay))) cursor.execute("drop database if exists %s" % self.insertDB) cursor.close() + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( @@ -155,6 +197,6 @@ if __name__ == '__main__': args = parser.parse_args() - perftest = taosdemoPerformace(args.commit_id, args.database_name) + perftest = taosdemoPerformace(args.commit_id, args.database_name) perftest.insertData() perftest.createTablesAndStoreData() diff --git a/tests/pytest/tools/taosdemoTest.py b/tests/pytest/tools/taosdemoTest.py index ff5921be604f9fe911f1aa8b84efe230baf20e07..5662881031a01d19398cce223892eebbd8133c97 100644 --- a/tests/pytest/tools/taosdemoTest.py +++ b/tests/pytest/tools/taosdemoTest.py @@ -36,7 +36,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosdemo" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] @@ -47,11 +47,11 @@ class TDTestCase: tdSql.prepare() buildPath = self.getBuildPath() if (buildPath == ""): - tdLog.exit("taosd not found!") + tdLog.exit("taosdemo not found!") else: - tdLog.info("taosd found in %s" % buildPath) + tdLog.info("taosdemo found in %s" % buildPath) binPath = buildPath + "/build/bin/" - os.system("%staosdemo -y -t %d -n %d" % + os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" % (binPath, self.numberOfTables, self.numberOfRecords)) tdSql.execute("use test") @@ -59,11 +59,11 @@ class TDTestCase: tdSql.checkData(0, 0, self.numberOfTables * self.numberOfRecords) tdSql.query( - "select sum(col1) from test.meters interval(1h) sliding(30m)") + "select sum(c1) from test.meters interval(1h) sliding(30m)") tdSql.checkRows(2) tdSql.query( - "select apercentile(col1, 1) from test.meters interval(100s)") + "select apercentile(c1, 1) from test.meters interval(100s)") tdSql.checkRows(1) tdSql.error("select loc, count(loc) from test.meters") diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py index 9aa131624e83e04df12b59b0b0318562098c77cb..56c535916a51046e65b2ddd9813141ddb8848bd1 100644 --- a/tests/pytest/tools/taosdemoTestTblAlt.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -54,7 +54,7 @@ class TDTestCase: binPath = buildPath + "/build/bin/" if(threadID == 0): - os.system("%staosdemo -y -t %d -n %d" % + os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT -m t" % (binPath, self.numberOfTables, self.numberOfRecords)) if(threadID == 1): time.sleep(2) diff --git a/tests/pytest/tools/taosdemoTestWithoutMetric.py b/tests/pytest/tools/taosdemoTestWithoutMetric.py index 9687600563d8fed68c6f9c67643759a3dcfa9703..01e19355d9dfde5c536ac1e28e1f190f33ab966e 100644 --- a/tests/pytest/tools/taosdemoTestWithoutMetric.py +++ b/tests/pytest/tools/taosdemoTestWithoutMetric.py @@ -60,7 +60,7 @@ class TDTestCase: tdSql.execute("use test") tdSql.query( - "select count(*) from test.t%d" % (self.numberOfTables -1)) + "select count(*) from test.d%d" % (self.numberOfTables -1)) tdSql.checkData(0, 0, self.numberOfRecords) def stop(self): diff --git a/tests/pytest/tools/taosdumpTest.py b/tests/pytest/tools/taosdumpTest.py index 534a477b340210b8ee6bcd77fe6010c6d3d261e0..0dfc42f331b1a1c59d71268985d6a72d4d652856 100644 --- a/tests/pytest/tools/taosdumpTest.py +++ b/tests/pytest/tools/taosdumpTest.py @@ -23,42 +23,109 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + self.ts = 1538548685000 self.numberOfTables = 10000 self.numberOfRecords = 100 - + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + def run(self): - tdSql.prepare() + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/tmp1") + else: + print("目录存在") - tdSql.execute("create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") - tdSql.execute("create table t1 using st tags(1, 'beijing')") + if not os.path.exists("./taosdumptest/tmp2"): + os.makedirs("./taosdumptest/tmp2") + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + tdSql.execute("create database db1 days 12 keep 3640 blocks 7 ") + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") + tdSql.execute("create table t1 using st tags(1, 'beijing')") sql = "insert into t1 values" currts = self.ts for i in range(100): sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100) tdSql.execute(sql) - - tdSql.execute("create table t2 using st tags(2, 'shanghai')") + tdSql.execute("create table t2 using st tags(2, 'shanghai')") sql = "insert into t2 values" currts = self.ts for i in range(100): sql += "(%d, %d, 'nchar%d')" % (currts + i, i % 100, i % 100) tdSql.execute(sql) - os.system("taosdump --databases db -o /tmp") - + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + os.system("%staosdump --databases db -o ./taosdumptest/tmp1" % binPath) + os.system( + "%staosdump --databases db1 -o ./taosdumptest/tmp2" % + binPath) + tdSql.execute("drop database db") + tdSql.execute("drop database db1") tdSql.query("show databases") tdSql.checkRows(0) - - os.system("taosdump -i /tmp") - tdSql.query("show databases") - tdSql.checkRows(1) - tdSql.checkData(0, 0, 'db') - + os.system("%staosdump -i ./taosdumptest/tmp1" % binPath) + os.system("%staosdump -i ./taosdumptest/tmp2" % binPath) + tdSql.execute("use db") + tdSql.query("show databases") + tdSql.checkRows(2) + dbresult = tdSql.queryResult + # 6--days,7--keep0,keep1,keep, 12--block, + + isCommunity = self.checkCommunity() + print("iscommunity: %d" % isCommunity) + for i in range(len(dbresult)): + if dbresult[i][0] == 'db': + print(dbresult[i]) + print(type(dbresult[i][6])) + print(type(dbresult[i][7])) + print(type(dbresult[i][9])) + assert dbresult[i][6] == 11 + if isCommunity: + assert dbresult[i][7] == "3649" + else: + assert dbresult[i][7] == "3649,3649,3649" + assert dbresult[i][9] == 8 + if dbresult[i][0] == 'db1': + assert dbresult[i][6] == 12 + if isCommunity: + assert dbresult[i][7] == "3640" + else: + assert dbresult[i][7] == "3640,3640,3640" + assert dbresult[i][9] == 7 + tdSql.query("show stables") tdSql.checkRows(1) tdSql.checkData(0, 0, 'st') @@ -80,10 +147,42 @@ class TDTestCase: tdSql.checkData(i, 1, i) tdSql.checkData(i, 2, "nchar%d" % i) + # drop all databases,boundary value testing. + # length(databasename)<=32;length(tablesname)<=192 + tdSql.execute("drop database db") + tdSql.execute("drop database db1") + os.system("rm -rf ./taosdumptest/tmp1") + os.system("rm -rf ./taosdumptest/tmp2") + os.makedirs("./taosdumptest/tmp1") + tdSql.execute("create database db12312313231231321312312312_323") + tdSql.error("create database db12312313231231321312312312_3231") + tdSql.execute("use db12312313231231321312312312_323") + tdSql.execute("create stable st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") + tdSql.error("create stable st_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") + tdSql.execute( + "create stable st(ts timestamp, c1 int, c2 nchar(10)) tags(t1 int, t2 binary(10))") + tdSql.error("create stable st1(ts timestamp, c1 int, col2_012345678901234567890123456789012345678901234567890123456789 nchar(10)) tags(t1 int, t2 binary(10))") + + tdSql.execute("select * from db12312313231231321312312312_323.st12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9") + tdSql.error("create table t0_12345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678912345678_9 using st tags(1, 'beijing')") + tdSql.query("show stables") + tdSql.checkRows(2) + os.system( + "%staosdump --databases db12312313231231321312312312_323 -o ./taosdumptest/tmp1" % binPath) + tdSql.execute("drop database db12312313231231321312312312_323") + os.system("%staosdump -i ./taosdumptest/tmp1" % binPath) + tdSql.execute("use db12312313231231321312312312_323") + tdSql.query("show stables") + tdSql.checkRows(2) + os.system("rm -rf ./taosdumptest/tmp1") + os.system("rm -rf ./taosdumptest/tmp2") + os.system("rm -rf ./dump_result.txt") + os.system("rm -rf ./db.csv") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py new file mode 100644 index 0000000000000000000000000000000000000000..bed0564139e20fb6c562a7258af0cbd5b542069b --- /dev/null +++ b/tests/pytest/tools/taosdumpTest2.py @@ -0,0 +1,99 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1601481600000 + self.numberOfTables = 1 + self.numberOfRecords = 15000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("create table st(ts timestamp, c1 timestamp, c2 int, c3 bigint, c4 float, c5 double, c6 binary(8), c7 smallint, c8 tinyint, c9 bool, c10 nchar(8)) tags(t1 int)") + tdSql.execute("create table t1 using st tags(0)") + currts = self.ts + finish = 0 + while(finish < self.numberOfRecords): + sql = "insert into t1 values" + for i in range(finish, self.numberOfRecords): + sql += "(%d, 1019774612, 29931, 1442173978, 165092.468750, 1128.643179, 'MOCq1pTu', 18405, 82, 0, 'g0A6S0Fu')" % (currts + i) + finish = i + 1 + if (1048576 - len(sql)) < 16384: + break + tdSql.execute(sql) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + os.system("rm /tmp/*.sql") + os.system( + "%staosdump --databases db -o /tmp -B 32766 -L 1048576" % + binPath) + + tdSql.execute("drop database db") + tdSql.query("show databases") + tdSql.checkRows(0) + + os.system("%staosdump -i /tmp" % binPath) + + tdSql.query("show databases") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'db') + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("select count(*) from t1") + tdSql.checkData(0, 0, self.numberOfRecords) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdumpTestNanoSupport.py b/tests/pytest/tools/taosdumpTestNanoSupport.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8832170b7706621f5ef9d3225fe2cf16141c34 --- /dev/null +++ b/tests/pytest/tools/taosdumpTestNanoSupport.py @@ -0,0 +1,362 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1625068800000000000 # this is timestamp "2021-07-01 00:00:00" + self.numberOfTables = 10 + self.numberOfRecords = 100 + + def checkCommunity(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + return False + else: + return True + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + + + def createdb(self, precision="ns"): + tb_nums = self.numberOfTables + per_tb_rows = self.numberOfRecords + + def build_db(precision, start_time): + tdSql.execute("drop database if exists timedb1") + tdSql.execute( + "create database timedb1 days 10 keep 365 blocks 8 precision "+"\""+precision+"\"") + + tdSql.execute("use timedb1") + tdSql.execute( + "create stable st(ts timestamp, c1 int, c2 nchar(10),c3 timestamp) tags(t1 int, t2 binary(10))") + for tb in range(tb_nums): + tbname = "t"+str(tb) + tdSql.execute("create table " + tbname + + " using st tags(1, 'beijing')") + sql = "insert into " + tbname + " values" + currts = start_time + if precision == "ns": + ts_seed = 1000000000 + elif precision == "us": + ts_seed = 1000000 + else: + ts_seed = 1000 + + for i in range(per_tb_rows): + sql += "(%d, %d, 'nchar%d',%d)" % (currts + i*ts_seed, i % + 100, i % 100, currts + i*100) # currts +1000ms (1000000000ns) + tdSql.execute(sql) + + if precision == "ns": + start_time = 1625068800000000000 + build_db(precision, start_time) + + elif precision == "us": + start_time = 1625068800000000 + build_db(precision, start_time) + + elif precision == "ms": + start_time = 1625068800000 + build_db(precision, start_time) + + else: + print("other time precision not valid , please check! ") + + + def run(self): + + # clear envs + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exist!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + # create nano second database + + self.createdb(precision="ns") + + # dump all data + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + # dump part data with -S -E + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -E 1625068860000000000 -C ns -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + # replace strings to dump in databases + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + # dump data and check for taosdump + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + # check data + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test nano second : dump check data pass for all data!" ) + else: + tdLog.info("test nano second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 and ts <= 1625068860000000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test nano second : dump check data pass for data! " ) + else: + tdLog.info(" test nano second : dump check data failed for data !" ) + + + # us second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="us") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -E 1625068860000000 -C us -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test us second : dump check data pass for all data!" ) + else: + tdLog.info("test us second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 and ts <= 1625068860000000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test us second : dump check data pass for data! " ) + else: + tdLog.info(" test us second : dump check data failed for data! " ) + + + # ms second support test case + + os.system("rm -rf ./taosdumptest/") + tdSql.execute("drop database if exists dumptmp1") + tdSql.execute("drop database if exists dumptmp2") + tdSql.execute("drop database if exists dumptmp3") + + if not os.path.exists("./taosdumptest/tmp1"): + os.makedirs("./taosdumptest/dumptmp1") + else: + print("path exits!") + + if not os.path.exists("./taosdumptest/dumptmp2"): + os.makedirs("./taosdumptest/dumptmp2") + + if not os.path.exists("./taosdumptest/dumptmp3"): + os.makedirs("./taosdumptest/dumptmp3") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + self.createdb(precision="ms") + + os.system( + "%staosdump --databases timedb1 -o ./taosdumptest/dumptmp1" % binPath) + + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -E 1625068860000 -C ms -o ./taosdumptest/dumptmp2 ' % + binPath) + os.system( + '%staosdump --databases timedb1 -S 1625068810000 -o ./taosdumptest/dumptmp3 ' % + binPath) + + os.system( + "sed -i \"s/timedb1/dumptmp1/g\" `grep timedb1 -rl ./taosdumptest/dumptmp1`") + os.system( + "sed -i \"s/timedb1/dumptmp2/g\" `grep timedb1 -rl ./taosdumptest/dumptmp2`") + os.system( + "sed -i \"s/timedb1/dumptmp3/g\" `grep timedb1 -rl ./taosdumptest/dumptmp3`") + + os.system( "%staosdump -i ./taosdumptest/dumptmp1" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp2" %binPath) + os.system( "%staosdump -i ./taosdumptest/dumptmp3" %binPath) + + + tdSql.query("select count(*) from dumptmp1.st") + tdSql.checkData(0,0,1000) + + tdSql.query("select count(*) from dumptmp2.st") + tdSql.checkData(0,0,510) + + tdSql.query("select count(*) from dumptmp3.st") + tdSql.checkData(0,0,900) + + + origin_res = tdSql.getResult("select * from timedb1.st") + dump_res = tdSql.getResult("select * from dumptmp1.st") + if origin_res == dump_res: + tdLog.info("test ms second : dump check data pass for all data!" ) + else: + tdLog.info("test ms second : dump check data failed for all data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 and ts <= 1625068860000") + dump_res = tdSql.getResult("select * from dumptmp2.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data!" ) + + origin_res = tdSql.getResult("select * from timedb1.st where ts >=1625068810000 ") + dump_res = tdSql.getResult("select * from dumptmp3.st") + if origin_res == dump_res: + tdLog.info(" test ms second : dump check data pass for data! " ) + else: + tdLog.info(" test ms second : dump check data failed for data! " ) + + + os.system("rm -rf ./taosdumptest/") + os.system("rm -rf ./dump_result.txt") + os.system("rm -rf *.py.sql") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 0f71ffd0a37587cc6be895b4b8b168e6b8cfcaf8..ae4ba97eb3d6c9d9530d9f218bf05dd25aff3b02 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -127,6 +127,7 @@ class TDDnode: "anyIp":"0", "tsEnableTelemetryReporting":"0", "dDebugFlag":"135", + "tsdbDebugFlag":"135", "mDebugFlag":"135", "sdbDebugFlag":"135", "rpcDebugFlag":"135", diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 8f62c5932b71049e97375ef6c57ecb563d204844..4a0455220637d0b2b99430008a62727a6ee70e30 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -18,6 +18,7 @@ import datetime import inspect import psutil import shutil +import pandas as pd from util.log import * @@ -78,6 +79,21 @@ class TDSql: raise Exception(repr(e)) return self.queryRows + def getColNameList(self, sql): + self.sql = sql + try: + col_name_list = [] + self.cursor.execute(sql) + self.queryCols = self.cursor.description + for query_col in self.queryCols: + col_name_list.append(query_col[0]) + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return col_name_list + def waitedQuery(self, sql, expectRows, timeout): tdLog.info("sql: %s, try to retrieve %d rows in %d seconds" % (sql, expectRows, timeout)) self.sql = sql @@ -134,25 +150,32 @@ class TDSql: return self.cursor.istype(col, dataType) def checkData(self, row, col, data): - self.checkRowCol(row, col) - if self.queryResult[row][col] != data: - if self.cursor.istype(col, "TIMESTAMP") and self.queryResult[row][col] == datetime.datetime.fromisoformat(data): - tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + self.checkRowCol(row, col) + if self.queryResult[row][col] != data: + if self.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % + (self.sql, row, col, self.queryResult[row][col], data)) + else: + if self.queryResult[row][col] == datetime.datetime.fromisoformat(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) return if str(self.queryResult[row][col]) == str(data): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + (self.sql, row, col, self.queryResult[row][col], data)) return - elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001: + elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001: tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % - (self.sql, row, col, self.queryResult[row][col], data)) + (self.sql, row, col, self.queryResult[row][col], data)) return else: caller = inspect.getframeinfo(inspect.stack()[1][0]) args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data) - tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) if data is None: tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % @@ -162,11 +185,11 @@ class TDSql: (self.sql, row, col, self.queryResult[row][col], data)) elif isinstance(data, datetime.date): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % - (self.sql, row, col, self.queryResult[row][col], data)) + (self.sql, row, col, self.queryResult[row][col], data)) elif isinstance(data, float): tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % (self.sql, row, col, self.queryResult[row][col], data)) - else: + else: tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % (self.sql, row, col, self.queryResult[row][col], data)) @@ -174,6 +197,19 @@ class TDSql: self.checkRowCol(row, col) return self.queryResult[row][col] + def getResult(self, sql): + self.sql = sql + try: + self.cursor.execute(sql) + self.queryResult = self.cursor.fetchall() + except Exception as e: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + return self.queryResult + + def executeTimes(self, sql, times): for i in range(times): try: @@ -200,7 +236,15 @@ class TDSql: tdLog.exit("%s(%d) failed: sql:%s, affectedRows:%d != expect:%d" % args) tdLog.info("sql:%s, affectedRows:%d == expect:%d" % (self.sql, self.affectedRows, expectAffectedRows)) - + + def checkColNameList(self, col_name_list, expect_col_name_list): + if col_name_list == expect_col_name_list: + tdLog.info("sql:%s, col_name_list:%s == expect_col_name_list:%s" % (self.sql, col_name_list, expect_col_name_list)) + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list) + tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args) + def taosdStatus(self, state): tdLog.sleep(5) pstate = 0 @@ -221,7 +265,7 @@ class TDSql: continue pstate = 0 break - + args=(pstate,state) if pstate == state: tdLog.info("taosd state is %d == expect:%d" %args) @@ -236,11 +280,11 @@ class TDSql: tdLog.exit("dir: %s is empty, expect: not empty" %dir) else: tdLog.info("dir: %s is empty, expect: empty" %dir) - else: + else: if state : tdLog.info("dir: %s is not empty, expect: not empty" %dir) else: - tdLog.exit("dir: %s is not empty, expect: empty" %dir) + tdLog.exit("dir: %s is not empty, expect: empty" %dir) else: tdLog.exit("dir: %s doesn't exist" %dir) def createDir(self, dir): @@ -250,5 +294,5 @@ class TDSql: os.makedirs( dir, 755 ) tdLog.info("dir: %s is created" %dir) pass - -tdSql = TDSql() + +tdSql = TDSql() \ No newline at end of file diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py new file mode 100644 index 0000000000000000000000000000000000000000..d211f86b81d0b41237c645c7955a3d5a3099cf11 --- /dev/null +++ b/tests/pytest/util/taosdemoCfg.py @@ -0,0 +1,465 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +import datetime +import inspect +import psutil +import shutil +import json +from util.log import * +from multiprocessing import cpu_count + + +# TODO: fully test the function. Handle exceptions. +# Handle json format not accepted by taosdemo + +### How to use TaosdemoCfg: +# Before you start: +# Make sure you understand how is taosdemo's JSON file structured. Because the python used does +# not support directory in directory for self objects, the config is being tear to different parts. +# Please make sure you understand which directory represent which part of which type of the file +# This module will reassemble the parts when creating the JSON file. +# +# Basic use example +# step 1:use self.append_sql_stb() to append the insert/query/subscribe directory into the module +# you can append many insert/query/subscribe directory, but pay attention about taosdemo's limit +# step 2:use alter function to alter the specific config +# step 3:use the generation function to generate the files +# +# step 1 and step 2 can be replaced with using import functions +class TDTaosdemoCfg: + def __init__(self): + self.insert_cfg = { + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": cpu_count(), + "thread_count_create_tbl": cpu_count(), + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 32766, + "max_sql_len": 32766, + "databases": None + } + + self.db = { + "name": 'db', + "drop": 'yes', + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 6, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + self.query_cfg = { + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 2, + "query_mode": "taosc", + "specified_table_query": None, + "super_table_query": None + } + + self.table_query = { + "query_interval": 1, + "concurrent": 3, + "sqls": None + } + + self.stable_query = { + "stblname": "stb", + "query_interval": 1, + "threads": 3, + "sqls": None + } + + self.sub_cfg = { + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": None, + "super_table_query": None + } + + self.table_sub = { + "concurrent": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stable_sub = { + "stblname": "stb", + "threads": 1, + "mode": "sync", + "interval": 10000, + "restart": "yes", + "keepProgress": "yes", + "sqls": None + } + + self.stbs = [] + self.stb_template = { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 100, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "batch_create_tbl_num": 5, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 100, + "childtable_limit": 10, + "childtable_offset": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 32766, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [{"type": "INT", "count": 1}], + "tags": [{"type": "BIGINT", "count": 1}] + } + + self.tb_query_sql = [] + self.tb_query_sql_template = { + "sql": "select last_row(*) from stb_0 ", + "result": "temp/query_res0.txt" + } + + self.stb_query_sql = [] + self.stb_query_sql_template = { + "sql": "select last_row(ts) from xxxx", + "result": "temp/query_res2.txt" + } + + self.tb_sub_sql = [] + self.tb_sub_sql_template = { + "sql": "select * from stb_0 ;", + "result": "temp/subscribe_res0.txt" + } + + self.stb_sub_sql = [] + self.stb_sub_sql_template = { + "sql": "select * from xxxx where ts > '2021-02-25 11:35:00.000' ;", + "result": "temp/subscribe_res1.txt" + } + + # The following functions are import functions for different dicts and lists + # except import_sql, all other import functions will a dict and overwrite the origional dict + # dict_in: the dict used to overwrite the target + def import_insert_cfg(self, dict_in): + self.insert_cfg = dict_in + + def import_db(self, dict_in): + self.db = dict_in + + def import_stbs(self, dict_in): + self.stbs = dict_in + + def import_query_cfg(self, dict_in): + self.query_cfg = dict_in + + def import_table_query(self, dict_in): + self.table_query = dict_in + + def import_stable_query(self, dict_in): + self.stable_query = dict_in + + def import_sub_cfg(self, dict_in): + self.sub_cfg = dict_in + + def import_table_sub(self, dict_in): + self.table_sub = dict_in + + def import_stable_sub(self, dict_in): + self.stable_sub = dict_in + + def import_sql(self, Sql_in, mode): + """used for importing the sql later used + + Args: + Sql_in (dict): the imported sql dict + mode (str): the sql storing location within TDTaosdemoCfg + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + """ + if mode == 'query_table': + self.tb_query_sql = Sql_in + elif mode == 'query_stable': + self.stb_query_sql = Sql_in + elif mode == 'sub_table': + self.tb_sub_sql = Sql_in + elif mode == 'sub_stable': + self.stb_sub_sql = Sql_in + # import functions end + + # The following functions are alter functions for different dicts + # Args: + # key: the key that is going to be modified + # value: the value of the key that is going to be modified + # if key = 'databases' | "specified_table_query" | "super_table_query"|"sqls" + # value will not be used + + def alter_insert_cfg(self, key, value): + + if key == 'databases': + self.insert_cfg[key] = [ + { + 'dbinfo': self.db, + 'super_tables': self.stbs + } + ] + else: + self.insert_cfg[key] = value + + def alter_db(self, key, value): + self.db[key] = value + + def alter_query_tb(self, key, value): + if key == "sqls": + self.table_query[key] = self.tb_query_sql + else: + self.table_query[key] = value + + def alter_query_stb(self, key, value): + if key == "sqls": + self.stable_query[key] = self.stb_query_sql + else: + self.stable_query[key] = value + + def alter_query_cfg(self, key, value): + if key == "specified_table_query": + self.query_cfg["specified_table_query"] = self.table_query + elif key == "super_table_query": + self.query_cfg["super_table_query"] = self.stable_query + else: + self.query_cfg[key] = value + + def alter_sub_cfg(self, key, value): + if key == "specified_table_query": + self.sub_cfg["specified_table_query"] = self.table_sub + elif key == "super_table_query": + self.sub_cfg["super_table_query"] = self.stable_sub + else: + self.sub_cfg[key] = value + + def alter_sub_stb(self, key, value): + if key == "sqls": + self.stable_sub[key] = self.stb_sub_sql + else: + self.stable_sub[key] = value + + def alter_sub_tb(self, key, value): + if key == "sqls": + self.table_sub[key] = self.tb_sub_sql + else: + self.table_sub[key] = value + # alter function ends + + # the following functions are for handling the sql lists + def append_sql_stb(self, target, value): + """for appending sql dict into specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + value (dict): the sql dict going to be appended + """ + if target == 'insert_stbs': + self.stbs.append(value) + elif target == 'query_table': + self.tb_query_sql.append(value) + elif target == 'query_stable': + self.stb_query_sql.append(value) + elif target == 'sub_table': + self.tb_sub_sql.append(value) + elif target == 'sub_stable': + self.stb_sub_sql.append(value) + + def pop_sql_stb(self, target, index): + """for poping a sql dict from specific sql list + + Args: + target (str): the target append list + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + index (int): the sql dict that is going to be popped + """ + if target == 'insert_stbs': + self.stbs.pop(index) + elif target == 'query_table': + self.tb_query_sql.pop(index) + elif target == 'query_stable': + self.stb_query_sql.pop(index) + elif target == 'sub_table': + self.tb_sub_sql.pop(index) + elif target == 'sub_stable': + self.stb_sub_sql.pop(index) + # sql list modification function end + + # The following functions are get functions for different dicts + def get_db(self): + return self.db + + def get_stb(self): + return self.stbs + + def get_insert_cfg(self): + return self.insert_cfg + + def get_query_cfg(self): + return self.query_cfg + + def get_tb_query(self): + return self.table_query + + def get_stb_query(self): + return self.stable_query + + def get_sub_cfg(self): + return self.sub_cfg + + def get_tb_sub(self): + return self.table_sub + + def get_stb_sub(self): + return self.stable_sub + + def get_sql(self, target): + """general get function for all sql lists + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'query_table': + return self.tb_query_sql + elif target == 'query_stable': + return self.stb_query_sql + elif target == 'sub_table': + return self.tb_sub_sql + elif target == 'sub_stable': + return self.stb_sub_sql + + def get_template(self, target): + """general get function for the default sql template + + Args: + target (str): the sql list want to get + format: 'fileType_tableType' + fileType: query, sub + tableType: table, stable + unique: 'insert_stbs' + """ + if target == 'insert_stbs': + return self.stb_template + elif target == 'query_table': + return self.tb_query_sql_template + elif target == 'query_stable': + return self.stb_query_sql_template + elif target == 'sub_table': + return self.tb_sub_sql_template + elif target == 'sub_stable': + return self.stb_sub_sql_template + else: + print(f'did not find {target}') + + # the folloing are the file generation functions + """defalut document: + generator functio for generating taosdemo json file + will assemble the dicts and dump the final json + + Args: + pathName (str): the directory wanting the json file to be + fileName (str): the name suffix of the json file + Returns: + str: [pathName]/[filetype]_[filName].json + """ + + def generate_insert_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/insert_{fileName}.json' + self.alter_insert_cfg('databases', None) + with open(cfgFileName, 'w') as file: + json.dump(self.insert_cfg, file) + return cfgFileName + + def generate_query_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/query_{fileName}.json' + self.alter_query_tb('sqls', None) + self.alter_query_stb('sqls', None) + self.alter_query_cfg('specified_table_query', None) + self.alter_query_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.query_cfg, file) + return cfgFileName + + def generate_subscribe_cfg(self, pathName, fileName): + cfgFileName = f'{pathName}/subscribe_{fileName}.json' + self.alter_sub_tb('sqls', None) + self.alter_sub_stb('sqls', None) + self.alter_sub_cfg('specified_table_query', None) + self.alter_sub_cfg('super_table_query', None) + with open(cfgFileName, 'w') as file: + json.dump(self.sub_cfg, file) + return cfgFileName + # file generation functions ends + + def drop_cfg_file(self, fileName): + os.remove(f'{fileName}') + + +taosdemoCfg = TDTaosdemoCfg() diff --git a/tests/pytest/wal/sdbComp.py b/tests/pytest/wal/sdbComp.py index c0ac02610f94dbe7c6bfb4ccd7aacd5ada03f205..428fbc9a145c0c3bae4507e33242ff3670c85024 100644 --- a/tests/pytest/wal/sdbComp.py +++ b/tests/pytest/wal/sdbComp.py @@ -26,10 +26,11 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - + def getBuildPath(self): + global selfPath selfPath = os.path.dirname(os.path.realpath(__file__)) - + if ("community" in selfPath): projPath = selfPath[:selfPath.find("community")] else: @@ -42,7 +43,7 @@ class TDTestCase: buildPath = root[:len(root)-len("/build/bin")] break return buildPath - + def run(self): # set path para @@ -53,7 +54,7 @@ class TDTestCase: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath+ "/build/bin/" - testPath = buildPath[:buildPath.find("debug")] + testPath = selfPath+ "/../../../" walFilePath = testPath + "/sim/dnode1/data/mnode_bak/wal/" #new db and insert data @@ -61,9 +62,9 @@ class TDTestCase: os.system("rm -rf %s/sim/dnode1/data/mnode_bak/" % testPath) tdSql.execute("drop database if exists db2") os.system("%staosdemo -f wal/insertDataDb1.json -y " % binPath) - tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists db1") os.system("%staosdemo -f wal/insertDataDb2.json -y " % binPath) - tdSql.execute("drop table if exists db2.stb0") + tdSql.execute("drop table if exists db2.stb0") os.system("%staosdemo -f wal/insertDataDb2Newstab.json -y " % binPath) query_pid1 = int(subprocess.getstatusoutput('ps aux|grep taosd |grep -v "grep"|awk \'{print $2}\'')[1]) print(query_pid1) @@ -71,14 +72,14 @@ class TDTestCase: tdSql.execute("drop table if exists stb1_0") tdSql.execute("drop table if exists stb1_1") tdSql.execute("insert into stb0_0 values(1614218412000,8637,78.861045,'R','bf3')(1614218422000,8637,98.861045,'R','bf3')") - tdSql.execute("alter table db2.stb0 add column col4 int") - tdSql.execute("alter table db2.stb0 drop column col2") - tdSql.execute("alter table db2.stb0 add tag t3 int;") + tdSql.execute("alter table db2.stb0 add column c4 int") + tdSql.execute("alter table db2.stb0 drop column c2") + tdSql.execute("alter table db2.stb0 add tag t3 int;") tdSql.execute("alter table db2.stb0 drop tag t1") - tdSql.execute("create table if not exists stb2_0 (ts timestamp, col0 int, col1 float) ") + tdSql.execute("create table if not exists stb2_0 (ts timestamp, c0 int, c1 float) ") tdSql.execute("insert into stb2_0 values(1614218412000,8637,78.861045)") - tdSql.execute("alter table stb2_0 add column col2 binary(4)") - tdSql.execute("alter table stb2_0 drop column col1") + tdSql.execute("alter table stb2_0 add column c2 binary(4)") + tdSql.execute("alter table stb2_0 drop column c1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") # stop taosd and compact wal file @@ -86,9 +87,9 @@ class TDTestCase: sleep(10) os.system("nohup %s/taosd --compact-mnode-wal -c %s/sim/dnode1/cfg/ & " %(binPath,testPath) ) sleep(5) - assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath + assert os.path.exists(walFilePath) , "%s is not generated, compact didn't take effect " % walFilePath - # use new wal file to start taosd + # use new wal file to start taosd tdDnodes.start(1) sleep(5) tdSql.execute("reset query cache") @@ -107,14 +108,14 @@ class TDTestCase: tdSql.checkData(0, 0, 2) tdSql.query("select count(*) from stb2_0") tdSql.checkData(0, 0, 2) - + # delete useless file testcaseFilename = os.path.split(__file__)[-1] os.system("rm -rf ./insert_res.txt") - os.system("rm -rf wal/%s.sql" % testcaseFilename ) - - - + os.system("rm -rf wal/%s.sql" % testcaseFilename) + + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/wal/sdbCompClusterReplica2.py b/tests/pytest/wal/sdbCompClusterReplica2.py index 117da8ca2ffd046e3cf23399174184b1f64e856b..dd5a37515142e4c5505ed442bc44240d5120aa4e 100644 --- a/tests/pytest/wal/sdbCompClusterReplica2.py +++ b/tests/pytest/wal/sdbCompClusterReplica2.py @@ -86,7 +86,16 @@ class TwoClients: tdSql.execute("alter table stb2_0 add column col2 binary(4)") tdSql.execute("alter table stb2_0 drop column col1") tdSql.execute("insert into stb2_0 values(1614218422000,8638,'R')") - + tdSql.execute("drop dnode 2") + sleep(10) + os.system("rm -rf /var/lib/taos/*") + print("clear dnode chenhaoran02'data files") + os.system("nohup /usr/bin/taosd > /dev/null 2>&1 &") + print("start taosd") + sleep(10) + tdSql.execute("reset query cache ;") + tdSql.execute("create dnode chenhaoran02 ;") + # stop taosd and compact wal file os.system("ps -ef |grep taosd |grep -v 'grep' |awk '{print $2}'|xargs kill -2") diff --git a/tests/robust/cluster.sh b/tests/robust/cluster.sh new file mode 100755 index 0000000000000000000000000000000000000000..b291fb4d9ea229c4620004a0305b06a08994a99e --- /dev/null +++ b/tests/robust/cluster.sh @@ -0,0 +1,334 @@ +#!/bin/bash +stty erase '^H' +stty erase '^?' + +# 运行前需要安装expect; apt install expect +# 运行方式: +# ./cluster.sh -c xxx.cfg +# cfg文件内格式: 每行代表一个节点 第一列为external ip、第二列为密码、第三列为用户名、第四列为hostname、第五列为interal ip +# 注意:列与列直接用空格隔开 +# 例子: +# 51.143.97.155 tbase125! root node5 10.2.0.10 +# 20.94.253.116 tbase125! root node2 10.2.0.12 +# 20.94.250.236 tbase125! root node3 10.2.0.13 +# 20.98.72.51 tbase125! root node4 10.2.0.14 + +menu(){ + echo "==============================" + echo "-------------Target-----------" + echo "==============================" + echo "1 cluster" + echo "==============================" + echo "2 dnode" + echo "==============================" + echo "3 arbitrator" + echo "==============================" + echo "4 alter replica" + echo "==============================" + echo "5 exit" + echo "==============================" +} + +cluster_menu(){ + echo "==============================" + echo "----------Operation-----------" + echo "==============================" + echo "1 start cluster" + echo "==============================" + echo "2 stop cluster" + echo "==============================" + echo "3 exit" + echo "==============================" +} + +dnode_menu(){ + echo "==============================" + echo "----------Operation-----------" + echo "==============================" + echo "1 start dnode" + echo "==============================" + echo "2 stop dnode" + echo "==============================" + echo "3 add dnode" + echo "==============================" + echo "4 drop dnode" + echo "==============================" + echo "5 exit" + echo "==============================" +} + +arbitrator_menu(){ + echo "==============================" + echo "----------Operation-----------" + echo "==============================" + echo "1 start arbitrator" + echo "==============================" + echo "2 stop arbitrator" + echo "==============================" + echo "3 exit" + echo "==============================" +} + +print_cfg() { + echo "==============================" + echo "-------Configure file---------" + echo "==============================" + echo "Id | IP address | hostname" + i=1 + while read line || [[ -n ${line} ]] + do + arr=($line) + echo " $i | ${arr[0]} | ${arr[3]}" + i=`expr $i + 1`; + done < $1 + echo "==============================" +} + +update(){ + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"systemctl $4 taosd\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mdnode successfully $4 \033[0m" +} + +update_dnode(){ + i=1 + while read line || [[ -n ${line} ]] + do + if [[ $1 -eq $i ]]; then + arr=($line) + update ${arr[0]} ${arr[1]} ${arr[2]} $2 + break; + fi + i=`expr $i + 1`; + done < $3 +} + +add_hosts() { + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"echo $4 $5 >> /etc/hosts\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully add to /etc/hosts in $1\033[0m" +} + +remove_hosts() { + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"sed -i '/$4/d\' /etc/hosts\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully remove from /etc/hosts in $1\033[0m" +} + +remove_varlibtaos() { + expect -c " + set timeout -1; + spawn ssh $3@$1; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect { + *#* { send \"rm -rf /var/lib/taos/*\r\" } + } + expect { + *#* { send \"exit\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully remove /var/lib/taos/* in $1\033[0m" +} + +scp_cfg() { + expect -c " + set timeout -1; + spawn scp /etc/taos/taos.cfg $3@$1:/etc/taos; + expect { + *yes/no* { send \"yes\r\"; exp_continue } + *assword:* { send \"$2\r\" } + } + expect eof; + " + echo -e "\033[32mSuccessfully scp /etc/taos/taos.cfg to $1\033[0m" +} + +manage_dnode(){ + i=1 + while read line || [[ -n ${line} ]] + do + if [[ $1 -eq $i ]]; then + arr=($line) + scp_cfg ${arr[0]} ${arr[1]} ${arr[2]} + ip=${arr[0]} + pd=${arr[1]} + user=${arr[2]} + j=1 + while read line2 || [[ -n ${line2} ]] + do + arr2=($line2) + if [[ $1 -ne $j ]]; then + if [ $3 == "create" ];then + echo "$3" + add_hosts $ip $pd $user ${arr2[4]} ${arr2[3]} + else + remove_hosts $ip $pd $user ${arr2[4]} ${arr2[3]} + fi + fi + j=`expr $j + 1`; + done < $2 + remove_varlibtaos $ip $pd $user + if [ $3 == "create" ];then + update $ip $pd $user "start" + else + update $ip $pd $user "stop" + fi + taos -s "$3 dnode \"${arr[3]}:6030\"" + break; + fi + i=`expr $i + 1`; + done < $2 + echo -e "\033[32mSuccessfully $3 dnode id $1\033[0m" +} + +update_cluster() { + while read line || [[ -n ${line} ]] + do + arr=($line) + if [ $1 == "start" ]; then + scp_cfg ${arr[0]} ${arr[1]} ${arr[2]} + fi + update ${arr[0]} ${arr[1]} ${arr[2]} $1 + done < $2 +} + +while : +do + clear + menu + read -p "select mode: " n + case $n in + 1) + clear + print_cfg $2 + cluster_menu + read -p "select operation: " c + case $c in + 1) + update_cluster "start" $2 + break + ;; + 2) + update_cluster "stop" $2 + break + ;; + 3) + break + ;; + esac + ;; + 2) + clear + print_cfg $2 + dnode_menu + read -p "select operation: " d + case $d in + 1) + clear + print_cfg $2 + read -p "select dnode: " id + update_dnode $id "start" $2 + break + ;; + 2) + clear + print_cfg $2 + read -p "select dnode: " id + update_dnode $id "stop" $2 + break + ;; + 3) + clear + print_cfg $2 + read -p "select dnode: " id + manage_dnode $id $2 "create" + break + ;; + 4) + clear + print_cfg $2 + read -p "select dnode: " id + manage_dnode $id $2 "drop" + break + ;; + 5) + break + ;; + esac + ;; + 3) + clear + arbitrator_menu + read -p "select operation: " m + case $m in + 1) + nohup /usr/local/taos/bin/tarbitrator >/dev/null 2>&1 & + echo -e "\033[32mSuccessfully start arbitrator $3 \033[0m" + break + ;; + 2) + var=`ps -ef | grep tarbitrator | awk '{print $2}' | head -n 1` + kill -9 $var + echo -e "\033[32mSuccessfully stop arbitrator $3 \033[0m" + break + ;; + 3) + break + ;; + esac + ;; + 4) + read -p "Enter replica number: " rep + read -p "Enter database name: " db + taos -s "alter database $db replica $rep" + echo -e "\033[32mSuccessfully change $db's replica to $rep \033[0m" + break + ;; + 5) + break + ;; + esac +done diff --git a/tests/robust/makefile b/tests/robust/makefile new file mode 100644 index 0000000000000000000000000000000000000000..c98469498f9de31f423e5d8cf19462f92d3e0b8f --- /dev/null +++ b/tests/robust/makefile @@ -0,0 +1,8 @@ +NAME=robust +CFLAGS = -O3 -g -Wall -Wno-deprecated -fPIC -Wno-unused-result -Wconversion -Wno-char-subscripts -D_REENTRANT -Wno-format -DLINUX -msse4.2 -Wno-unused-function -D_M_X64 -std=gnu99 + +all: + gcc $(CFLAGS) ./$(NAME).c -o $(NAME) -ltaos -lpthread + +clean: + rm $(NAME) \ No newline at end of file diff --git a/tests/robust/monitor.sh b/tests/robust/monitor.sh new file mode 100755 index 0000000000000000000000000000000000000000..4fc1fed75275f87eb184ed34234fdb04a7a5e312 --- /dev/null +++ b/tests/robust/monitor.sh @@ -0,0 +1,8 @@ +while : +do + dlog=`taos -s "show dnodes"` + mlog=`taos -s "show mnodes"` + echo "$dlog" | tee -a dnode.log + echo "$mlog" | tee -a mnode.log + sleep 1s +done \ No newline at end of file diff --git a/tests/robust/robust.c b/tests/robust/robust.c new file mode 100644 index 0000000000000000000000000000000000000000..3488ca1b9be8b6cca4d04fe5ba5841f00527b25d --- /dev/null +++ b/tests/robust/robust.c @@ -0,0 +1,260 @@ +#include +#include +#include +#include +#include +#include +#include + +typedef struct { + char querySQL[256]; + int client; + int stable; + int table; + unsigned interval; + int insert; + int create; + int query; +} ProArgs; + +typedef struct { + pthread_t pid; + int threadId; + void* taos; +} ThreadObj; + +static ProArgs arguments; +void parseArg(int argc, char *argv[]); +void create(); +void createImp(void *param); +void start(); +void insertImp(void *param); +void queryImp(void *param); + +int64_t getTimeStampMs() { + struct timeval systemTime; + gettimeofday(&systemTime, NULL); + return (int64_t)systemTime.tv_sec * 1000L + (int64_t)systemTime.tv_usec / 1000; +} + +void parseArg(int argc, char *argv[]) { + arguments.stable = 100000; + arguments.table = 50; + arguments.client = 16; + arguments.interval = 1; + sprintf(arguments.querySQL, "select count(*) from"); + for (int i = 1; i < argc; ++i) { + if (strcmp(argv[i], "-stable") == 0) { + if (i < argc - 1) { + arguments.stable = atoi(argv[++i]); + } else { + fprintf(stderr, "'-stable' requires a parameter, default:%d\n", arguments.stable); + } + } else if (strcmp(argv[i], "-table") == 0) { + if (i < argc - 1) { + arguments.table = atoi(argv[++i]); + } else { + fprintf(stderr, "'-table' requires a parameter, default:%d\n", arguments.table); + } + } else if (strcmp(argv[i], "-client") == 0) { + if (i < argc - 1) { + arguments.client = atoi(argv[++i]); + } else { + fprintf(stderr, "'-client' requires a parameter, default:%d\n", arguments.client); + } + } else if (strcmp(argv[i], "-sql") == 0) { + if (i < argc - 1) { + strcpy(arguments.querySQL,argv[++i]); + } else { + fprintf(stderr, "'-sql' requires a parameter, default:%d\n", arguments.querySQL); + } + } else if (strcmp(argv[i], "-insert") == 0) { + arguments.insert = 1; + } else if (strcmp(argv[i], "-create") == 0) { + arguments.create = 1; + } else if (strcmp(argv[i], "-query") == 0) { + arguments.query = 1; + } + } +} + +void create() { + int64_t st = getTimeStampMs(); + void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + if (taos == NULL) { + printf("TDengine error: failed to connect\n"); + exit(EXIT_FAILURE); + } + TAOS_RES *result = taos_query(taos, "drop database if exists db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + exit(EXIT_FAILURE); + } + taos_free_result(result); + int64_t elapsed = getTimeStampMs() - st; + printf("--- spend %ld ms to drop database db\n", elapsed); + st = getTimeStampMs(); + result = taos_query(taos, "create database if not exists db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + exit(EXIT_FAILURE); + } + taos_free_result(result); + elapsed = getTimeStampMs() - st; + printf("--- spend %ld ms to create database db\n", elapsed); + st = getTimeStampMs(); + start(); + printf("--- Spend %ld ms to create %d super tables and each %d tables\n", elapsed, arguments.stable, arguments.table); +} + +void createImp(void *param) { + char command[256] = "\0"; + int sqlLen = 0; + int count = 0; + char *sql = calloc(1, 1024*1024); + ThreadObj *pThread = (ThreadObj *)param; + printf("Thread %d start create super table s%d to s%d\n", pThread->threadId, (pThread->threadId - 1) * arguments.stable / arguments.client, pThread->threadId * arguments.stable / arguments.client - 1); + for (int i = (pThread->threadId - 1) * arguments.stable / arguments.client; i < pThread->threadId * arguments.stable / arguments.client; i++) { + sprintf(command, "create table if not exists db.s%d(ts timestamp, c1 int, c2 int, c3 int) TAGS (id int)", i); + TAOS_RES *result = taos_query(pThread->taos, command); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql:%s, reason:%s\n", __LINE__, command, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + result = taos_query(pThread->taos, "use db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + sqlLen = sprintf(sql, "create table if not exists "); + count = 0; + for (int j = 0; j < arguments.table; j++) { + sqlLen += sprintf(sql + sqlLen, " s%d_%d USING s%d TAGS (%d)", i, j, i, j); + if ((j + 1) % arguments.table == 0) { + result = taos_query(pThread->taos, sql); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + printf("Thread %d created table s%d_%d to s%d_%d\n", pThread->threadId, i, count, i, j); + count = j; + sqlLen = sprintf(sql, "create table if not exists "); + } + } + } +} + +void start() { + ThreadObj *threads = calloc((size_t)arguments.client, sizeof(ThreadObj)); + for (int i = 0; i < arguments.client; i++) { + ThreadObj *pthread = threads + i; + pthread_attr_t thattr; + pthread->threadId = i + 1; + pthread->taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + if (arguments.create) { + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))createImp, pthread); + } else if (arguments.insert) { + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))insertImp, pthread); + } else if (arguments.query) { + pthread_create(&pthread->pid, &thattr, (void *(*)(void *))queryImp, pthread); + } + } + for (int i = 0; i < arguments.client; i++) { + pthread_join(threads[i].pid, NULL); + } + free(threads); +} + +void insertImp(void *param) { + int count = 0; + ThreadObj *pThread = (ThreadObj *)param; + printf("Thread %d start insert data\n", pThread->threadId); + int sqlLen = 0; + char *sql = calloc(1, 1024*1024); + TAOS_RES *result = taos_query(pThread->taos, "use db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + int64_t time = getTimeStampMs(); + while (true) { + sqlLen = sprintf(sql, "insert into"); + for (int i = (pThread->threadId - 1) * arguments.stable / arguments.client; i < pThread->threadId * arguments.stable / arguments.client; i++) { + for (int j = 0; j < arguments.table; j++) { + sqlLen += sprintf(sql + sqlLen, " s%d_%d values (%ld, %d, %d, %d)", i, j, time, rand(), rand(), rand()); + count++; + if ( (1048576 - sqlLen) < 49151 || i == (pThread->threadId * arguments.stable / arguments.client - 1)) { + result = taos_query(pThread->taos, sql); + printf("Thread %d already insert %d rows\n", pThread->threadId, count); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + sqlLen = sprintf(sql, "insert into"); + } + } + } + time += 1000*arguments.interval; + } +} + +void queryImp(void *param) { + ThreadObj *pThread = (ThreadObj *)param; + printf("Thread %d start query\n", pThread->threadId); + int sqlLen = 0; + char *sql = calloc(1, 1024*1024); + TAOS_RES *result = taos_query(pThread->taos, "use db"); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql, reason:%s\n", __LINE__, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); + while (true) { + for (int i = (pThread->threadId - 1) * arguments.stable / arguments.client; i < pThread->threadId * arguments.stable / arguments.client; i++) { + sqlLen = sprintf(sql, arguments.querySQL); + sqlLen += sprintf(sql + sqlLen, " s%d", i); + result = taos_query(pThread->taos, sql); + if (result == NULL || taos_errno(result) != 0) { + printf("In line:%d, failed to execute sql: %s, reason:%s\n", __LINE__, sql, taos_errstr(result)); + taos_free_result(result); + return; + } + int num_fields = taos_field_count(result); + TAOS_FIELD *fields = taos_fetch_fields(result); + TAOS_ROW row = NULL; + while ((row = taos_fetch_row(result))) { + char temp[16000] = {0}; + int len = taos_print_row(temp, row, fields, num_fields); + len += sprintf(temp + len, "\n"); + printf("Thread %d query result: %s\n", pThread->threadId, temp); + } + taos_free_result(result); + sleep(arguments.interval); + } + } + +} + +int main(int argc, char *argv[]) { + parseArg(argc, argv); + if (arguments.insert || arguments.query || arguments.create) { + start(); + } else { + printf("choose one argument: \n1) -create\n2) -insert\n3) -query\n"); + } +} \ No newline at end of file diff --git a/tests/script/api/makefile b/tests/script/api/makefile index 5eeb1342887afc2c4f920aa673466eef7f7ae510..7595594cbf2572623dd18648c8c4fa8e65dd966a 100644 --- a/tests/script/api/makefile +++ b/tests/script/api/makefile @@ -13,7 +13,9 @@ all: $(TARGET) exe: gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS) gcc $(CFLAGS) ./stmtBatchTest.c -o $(ROOT)stmtBatchTest $(LFLAGS) + gcc $(CFLAGS) ./stmtTest.c -o $(ROOT)stmtTest $(LFLAGS) clean: rm $(ROOT)batchprepare rm $(ROOT)stmtBatchTest + rm $(ROOT)stmtTest diff --git a/tests/script/api/stmtTest.c b/tests/script/api/stmtTest.c new file mode 100644 index 0000000000000000000000000000000000000000..9595fe5b2d72e3291959828badf45abc2f7cb71e --- /dev/null +++ b/tests/script/api/stmtTest.c @@ -0,0 +1,238 @@ +#include +#include +#include +#include "taos.h" +#include +#include +#include + +#define PRINT_ERROR printf("\033[31m"); +#define PRINT_SUCCESS printf("\033[32m"); + +void execute_simple_sql(void *taos, char *sql) { + TAOS_RES *result = taos_query(taos, sql); + if ( result == NULL || taos_errno(result) != 0) { + PRINT_ERROR + printf("failed to %s, Reason: %s\n", sql, taos_errstr(result)); + taos_free_result(result); + exit(EXIT_FAILURE); + } + taos_free_result(result); + PRINT_SUCCESS + printf("Successfully %s\n", sql); +} + +void check_result(TAOS *taos, int id, int expected) { + char sql[256] = {0}; + sprintf(sql, "select * from t%d", id); + TAOS_RES *result; + result = taos_query(taos, sql); + if ( result == NULL || taos_errno(result) != 0) { + PRINT_ERROR + printf("failed to %s, Reason: %s\n", sql, taos_errstr(result)); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully execute %s\n", sql); + int rows = 0; + TAOS_ROW row; + while ((row = taos_fetch_row(result))) { + rows++; + } + if (rows == expected) { + PRINT_SUCCESS + printf("table t%d's %d rows are fetched as expected\n", id, rows); + } else { + PRINT_ERROR + printf("table t%d's %d rows are fetched but %d expected\n", id, rows, expected); + } + taos_free_result(result); +} + +int main(int argc, char *argv[]) { + void *taos = taos_connect("127.0.0.1", "root", "taosdata", NULL, 0); + if (taos == NULL) { + PRINT_ERROR + printf("TDengine error: failed to connect\n"); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully connected to TDengine\n"); + + execute_simple_sql(taos, "drop database if exists test"); + execute_simple_sql(taos, "create database test"); + execute_simple_sql(taos, "use test"); + execute_simple_sql(taos, "create table super(ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 binary(8), c6 smallint, c7 tinyint, c8 bool, c9 nchar(8), c10 timestamp) tags (t1 int, t2 bigint, t3 float, t4 double, t5 binary(8), t6 smallint, t7 tinyint, t8 bool, t9 nchar(8))"); + + char *sql = calloc(1, 1024*1024); + int sqlLen = 0; + sqlLen = sprintf(sql, "create table"); + for (int i = 0; i < 10; i++) { + sqlLen += sprintf(sql + sqlLen, " t%d using super tags (%d, 2147483648, 0.1, 0.000000001, 'abcdefgh', 32767, 127, 1, '一二三四五六七八')", i, i); + } + execute_simple_sql(taos, sql); + + + int code = taos_load_table_info(taos, "t0,t1,t2,t3,t4,t5,t6,t7,t8,t9"); + if (code != 0) { + PRINT_ERROR + printf("failed to load table info: 0x%08x\n", code); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully load table info\n"); + + TAOS_STMT *stmt = taos_stmt_init(taos); + if (stmt == NULL) { + PRINT_ERROR + printf("TDengine error: failed to init taos_stmt\n"); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully init taos_stmt\n"); + + uintptr_t c10len = 0; + struct { + int64_t c1; + int32_t c2; + int64_t c3; + float c4; + double c5; + unsigned char c6[8]; + int16_t c7; + int8_t c8; + int8_t c9; + char c10[32]; + } v = {0}; + TAOS_BIND params[11]; + params[0].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[0].buffer_length = sizeof(v.c1); + params[0].buffer = &v.c1; + params[0].length = ¶ms[0].buffer_length; + params[0].is_null = NULL; + + params[1].buffer_type = TSDB_DATA_TYPE_INT; + params[1].buffer_length = sizeof(v.c2); + params[1].buffer = &v.c2; + params[1].length = ¶ms[1].buffer_length; + params[1].is_null = NULL; + + params[2].buffer_type = TSDB_DATA_TYPE_BIGINT; + params[2].buffer_length = sizeof(v.c3); + params[2].buffer = &v.c3; + params[2].length = ¶ms[2].buffer_length; + params[2].is_null = NULL; + + params[3].buffer_type = TSDB_DATA_TYPE_FLOAT; + params[3].buffer_length = sizeof(v.c4); + params[3].buffer = &v.c4; + params[3].length = ¶ms[3].buffer_length; + params[3].is_null = NULL; + + params[4].buffer_type = TSDB_DATA_TYPE_DOUBLE; + params[4].buffer_length = sizeof(v.c5); + params[4].buffer = &v.c5; + params[4].length = ¶ms[4].buffer_length; + params[4].is_null = NULL; + + params[5].buffer_type = TSDB_DATA_TYPE_BINARY; + params[5].buffer_length = sizeof(v.c6); + params[5].buffer = &v.c6; + params[5].length = ¶ms[5].buffer_length; + params[5].is_null = NULL; + + params[6].buffer_type = TSDB_DATA_TYPE_SMALLINT; + params[6].buffer_length = sizeof(v.c7); + params[6].buffer = &v.c7; + params[6].length = ¶ms[6].buffer_length; + params[6].is_null = NULL; + + params[7].buffer_type = TSDB_DATA_TYPE_TINYINT; + params[7].buffer_length = sizeof(v.c8); + params[7].buffer = &v.c8; + params[7].length = ¶ms[7].buffer_length; + params[7].is_null = NULL; + + params[8].buffer_type = TSDB_DATA_TYPE_BOOL; + params[8].buffer_length = sizeof(v.c9); + params[8].buffer = &v.c9; + params[8].length = ¶ms[8].buffer_length; + params[8].is_null = NULL; + + params[9].buffer_type = TSDB_DATA_TYPE_NCHAR; + params[9].buffer_length = sizeof(v.c10); + params[9].buffer = &v.c10; + params[9].length = &c10len; + params[9].is_null = NULL; + + params[10].buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + params[10].buffer_length = sizeof(v.c1); + params[10].buffer = &v.c1; + params[10].length = ¶ms[10].buffer_length; + params[10].is_null = NULL; + + char *stmt_sql = "insert into ? values (?,?,?,?,?,?,?,?,?,?,?)"; + code = taos_stmt_prepare(stmt, stmt_sql, 0); + if (code != 0){ + PRINT_ERROR + printf("failed to execute taos_stmt_prepare. code:0x%x\n", code); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully execute taos_stmt_prepare\n"); + + for (int i = 0; i < 10; i++) { + char buf[32]; + sprintf(buf, "t%d", i); + if (i == 0) { + code = taos_stmt_set_tbname(stmt, buf); + if (code != 0) { + PRINT_ERROR + printf("failed to execute taos_stmt_set_tbname. code:0x%x\n", code); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully execute taos_stmt_set_tbname\n"); + } else { + code = taos_stmt_set_sub_tbname(stmt, buf); + if (code != 0) { + PRINT_ERROR + printf("failed to execute taos_stmt_set_sub_tbname. code:0x%x\n", code); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully execute taos_stmt_set_sub_tbname\n"); + } + + v.c1 = (int64_t)1591060628000; + v.c2 = (int32_t)2147483647; + v.c3 = (int64_t)2147483648; + v.c4 = (float)0.1; + v.c5 = (double)0.000000001; + for (int j = 0; j < sizeof(v.c6); j++) { + v.c6[j] = (char)('a'); + } + v.c7 = 32767; + v.c8 = 127; + v.c9 = 1; + strcpy(v.c10, "一二三四五六七八"); + c10len=strlen(v.c10); + taos_stmt_bind_param(stmt, params); + taos_stmt_add_batch(stmt); + } + + if (taos_stmt_execute(stmt) != 0) { + PRINT_ERROR + printf("failed to execute insert statement.\n"); + exit(EXIT_FAILURE); + } + PRINT_SUCCESS + printf("Successfully execute insert statement.\n"); + + taos_stmt_close(stmt); + for (int i = 0; i < 10; i++) { + check_result(taos, i, 1); + } + + return 0; +} diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim index 937265753323a41e51ddf9a3be0061c66b6c586a..5b5a911558b5eb1d27f34fb10590a3b9ff52658c 100644 --- a/tests/script/fullGeneralSuite.sim +++ b/tests/script/fullGeneralSuite.sim @@ -105,6 +105,7 @@ run general/parser/import_commit2.sim run general/parser/import_commit3.sim run general/parser/insert_tb.sim run general/parser/first_last.sim +run general/parser/line_insert.sim #unsupport run general/parser/import_file.sim run general/parser/lastrow.sim run general/parser/nchar.sim @@ -131,6 +132,7 @@ run general/parser/join.sim run general/parser/join_multivnode.sim run general/parser/select_with_tags.sim run general/parser/groupby.sim +run general/parser/top_groupby.sim run general/parser/tags_dynamically_specifiy.sim run general/parser/set_tag_vals.sim #unsupport run general/parser/repeatAlter.sim diff --git a/tests/script/general/db/alter_option.sim b/tests/script/general/db/alter_option.sim index c20a96fd1bd18ba911cce02859d7265d0294b76b..36f4c0e7dcfca27f77b0e701adff47e06f9d1bb0 100644 --- a/tests/script/general/db/alter_option.sim +++ b/tests/script/general/db/alter_option.sim @@ -198,29 +198,25 @@ if $data12_db != 1 then return -1 endi -sql alter database db wal 1 -sql show databases -print wal $data12_db -if $data12_db != 1 then - return -1 -endi +sql_error alter database db wal 1 + -sql alter database db wal 1 -sql alter database db wal 2 -sql alter database db wal 1 -sql alter database db wal 2 -sql alter database db wal 0 +sql_error alter database db wal 1 +sql_error alter database db wal 2 +sql_error alter database db wal 1 +sql_error alter database db wal 2 +sql_error alter database db wal 0 sql_error alter database db wal 3 sql_error alter database db wal 4 sql_error alter database db wal -1 sql_error alter database db wal 1000 print ============== step fsync -sql alter database db fsync 0 -sql alter database db fsync 1 -sql alter database db fsync 3600 -sql alter database db fsync 18000 -sql alter database db fsync 180000 +sql_error alter database db fsync 0 +sql_error alter database db fsync 1 +sql_error alter database db fsync 3600 +sql_error alter database db fsync 18000 +sql_error alter database db fsync 180000 sql_error alter database db fsync 180001 sql_error alter database db fsync -1 diff --git a/tests/script/general/db/topic1.sim b/tests/script/general/db/topic1.sim index 4939e5a0e2274e8ab26465fe04ca99a79f9f9ce0..16399731208915602fea6dcd123fb85edf2e085b 100644 --- a/tests/script/general/db/topic1.sim +++ b/tests/script/general/db/topic1.sim @@ -495,18 +495,13 @@ if $data12_db != 1 then endi sql_error alter topic db wal 1 -sql alter database db wal 1 -sql show databases -print wal $data12_db -if $data12_db != 1 then - return -1 -endi +sql_error alter database db wal 1 -sql alter database db wal 1 -sql alter database db wal 2 -sql alter database db wal 1 -sql alter database db wal 2 -sql alter database db wal 0 +sql_error alter database db wal 1 +sql_error alter database db wal 2 +sql_error alter database db wal 1 +sql_error alter database db wal 2 +sql_error alter database db wal 0 sql_error alter database db wal 3 sql_error alter database db wal 4 sql_error alter database db wal -1 @@ -523,11 +518,11 @@ sql_error alter topic db wal -1 sql_error alter topic db wal 1000 print ============== step fsync -sql alter database db fsync 0 -sql alter database db fsync 1 -sql alter database db fsync 3600 -sql alter database db fsync 18000 -sql alter database db fsync 180000 +sql_error alter database db fsync 0 +sql_error alter database db fsync 1 +sql_error alter database db fsync 3600 +sql_error alter database db fsync 18000 +sql_error alter database db fsync 180000 sql_error alter database db fsync 180001 sql_error alter database db fsync -1 @@ -615,17 +610,6 @@ if $rows != 1 then return -1 endi -sql alter database d1 fsync 0 -sql show topics; -if $rows != 0 then - return -1 -endi - -sql show databases; -if $rows != 1 then - return -1 -endi - sql drop database d1 sql show topics; if $rows != 0 then @@ -649,17 +633,6 @@ if $rows != 1 then return -1 endi -sql alter database d1 fsync 0 -sql show topics; -if $rows != 1 then - return -1 -endi - -sql show databases; -if $rows != 1 then - return -1 -endi - sql drop database d1 sql show topics; if $rows != 0 then diff --git a/tests/script/general/field/bigint.sim b/tests/script/general/field/bigint.sim index 3bb120c6410a2ea2e9c671bb4aca56fc8014124f..538f966c49cc3eefb8516c4bac4a49b9efd78d37 100644 --- a/tests/script/general/field/bigint.sim +++ b/tests/script/general/field/bigint.sim @@ -159,4 +159,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/field/unsigined_bigint.sim b/tests/script/general/field/unsigined_bigint.sim new file mode 100644 index 0000000000000000000000000000000000000000..1cfe8ad15b31f38d2fc72d429a0c9af2cb5732fb --- /dev/null +++ b/tests/script/general/field/unsigined_bigint.sim @@ -0,0 +1,166 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start + +sleep 2000 +sql connect +print ======================== dnode1 start + +$dbPrefix = db +$tbPrefix = tb +$mtPrefix = st +$tbNum = 10 +$rowNum = 20 +$totalNum = 200 + +print =============== step1 +$i = 0 +$db = $dbPrefix . $i +$mt = $mtPrefix . $i + +sql create database $db +sql use $db +sql create table $mt (ts timestamp, tbcol bigint unsigned) TAGS(tgcol bigint unsigned) + +$i = 0 +while $i < 5 + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( 0 ) + sql create table $tb using $mt tags( -111 ) + $x = 0 + while $x < $rowNum + $ms = $x . m + sql insert into $tb values (now + $ms , 0 ) + $x = $x + 1 + sql_error insert into $tb values (now + $ms , -10) + sql_error insert into $tb values (now + $ms , -1000) + sql_error insert into $tb values (now + $ms , -10000000) + endw + $i = $i + 1 +endw +while $i < 10 + $tb = $tbPrefix . $i + sql create table $tb using $mt tags( 1 ) + $x = 0 + while $x < $rowNum + $ms = $x . m + sql insert into $tb values (now + $ms , 1 ) + $x = $x + 1 + endw + $i = $i + 1 +endw + +print =============== step2 +sql select * from $mt where tbcol = 0 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol <> 0 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol = 1 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol <> 1 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol = 1 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol <> 1 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol = 0 +if $rows != 100 then + return -1 +endi +sql select * from $mt where tbcol <> 0 +if $rows != 100 then + return -1 +endi + +print =============== step3 +sql select * from $mt where ts > now + 4m and tbcol = 1 +if $rows != 75 then + return -1 +endi +sql select * from $mt where ts > now + 4m and tbcol <> 1 +if $rows != 75 then + return -1 +endi +sql select * from $mt where ts < now + 4m and tbcol = 0 +if $rows != 25 then + return -1 +endi +sql select * from $mt where ts < now + 4m and tbcol <> 0 +if $rows != 25 then + return -1 +endi +sql select * from $mt where ts <= now + 4m and tbcol = 0 +if $rows != 25 then + return -1 +endi +sql select * from $mt where ts <= now + 4m and tbcol <> 0 +if $rows != 25 then + return -1 +endi +sql select * from $mt where ts > now + 4m and ts < now + 5m and tbcol <> 0 +if $rows != 5 then + return -1 +endi +sql select * from $mt where ts > now + 4m and tbcol <> 0 and ts < now + 5m +if $rows != 5 then + return -1 +endi + +print =============== step4 +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 200 then + return -1 +endi + +print =============== step5 +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 100 then + return -1 +endi + +print =============== step6 +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 +if $data00 != 100 then + print expect 100, actual $data00 + return -1 +endi + +print =============== step7 +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where ts < now + 4m and tbcol = 1 group by tgcol +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data00 != 25 then + return -1 +endi + +print =============== step8 +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 interval(1d) group by tgcol order by tgcol desc +print $data00 $data01 $data02 $data03 $data04 $data05 $data06 +if $data01 != 100 then + return -1 +endi + +print =============== clear +sql drop database $db +sql show databases +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/http/grafana_bug.sim b/tests/script/general/http/grafana_bug.sim index 0816e88f3fdfaf5a6292f746c66ecb562bee177e..ed184e17c6deceaa118c512a8ccf5f5b4df3ffbc 100644 --- a/tests/script/general/http/grafana_bug.sim +++ b/tests/script/general/http/grafana_bug.sim @@ -247,4 +247,25 @@ if $system_content != @[{"refId":"A","target":"{val1:nil, val2:nil}","datapoints return -1 endi +sql create table tt (ts timestamp ,i int) tags(j binary(20),k binary(20)); +sql insert into t1 using tt tags('jnetworki','t1') values('2020-01-01 00:00:00.000',1)('2020-01-01 00:01:00.000',2)('2020-01-01 00:02:00.000',3)('2020-01-01 00:03:00.000',4)('2020-01-01 00:04:00.000',5); + +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%network%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query +print step1-> $system_content +if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then + return -1 +endi + +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027jnetwo%\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query +print step1-> $system_content +if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then + return -1 +endi + +system_content curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d '[ {"refId":"A","alias":"","sql":"select max(i) from db.tt where j like \u0027%networki\u0027 and ts >= \u00272020-01-01 00:00:00.000\u0027 and ts < \u00272020-01-01 00:05:00.000\u0027 interval(5m) group by k "} ]' 127.0.0.1:7111/grafana/query +print step1-> $system_content +if $system_content != @[{"refId":"A","target":"{k:t1}","datapoints":[[5,1577808000000]]}]@ then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/col_arithmetic_query.sim b/tests/script/general/parser/col_arithmetic_query.sim index 191f56fcfb7d13caf1cb981f518e8bdd439c42b3..17ae6cfd6b8b5636101e67e8d99f6999e50a06a5 100644 --- a/tests/script/general/parser/col_arithmetic_query.sim +++ b/tests/script/general/parser/col_arithmetic_query.sim @@ -193,7 +193,7 @@ if $data02 != 0.000000000 then return -1 endi -if $data03 != 0.000000000 then +if $data03 != NULL then return -1 endi @@ -444,7 +444,7 @@ if $data02 != 8.077777778 then return -1 endi -if $data03 != inf then +if $data03 != NULL then return -1 endi diff --git a/tests/script/general/parser/create_db.sim b/tests/script/general/parser/create_db.sim index 7881060ad178fe3b3f7a9ea0530a1ac517264a3e..a62a45e0233f5db81bc7565c856f08483c9d8aee 100644 --- a/tests/script/general/parser/create_db.sim +++ b/tests/script/general/parser/create_db.sim @@ -237,7 +237,42 @@ sql_error create database $db ctime 29 sql_error create database $db ctime 40961 # wal {0, 2} -#sql_error create database $db wal 0 +sql create database testwal wal 0 +sql show databases +if $rows != 1 then + return -1 +endi + +sql show databases +print wallevel $data12_testwal +if $data12_testwal != 0 then + return -1 +endi +sql drop database testwal + +sql create database testwal wal 1 +sql show databases +if $rows != 1 then + return -1 +endi +sql show databases +print wallevel $data12_testwal +if $data12_testwal != 1 then + return -1 +endi +sql drop database testwal + +sql create database testwal wal 2 +sql show databases +if $rows != 1 then + return -1 +endi +print wallevel $data12_testwal +if $data12_testwal != 2 then + return -1 +endi +sql drop database testwal + sql_error create database $db wal -1 sql_error create database $db wal 3 diff --git a/tests/script/general/parser/create_mt.sim b/tests/script/general/parser/create_mt.sim index ae1629dce9861d7540bda4b6d4014e32ce2ce52d..c606ba99ecb5f9060de9bb8f52af916c2d2b96b6 100644 --- a/tests/script/general/parser/create_mt.sim +++ b/tests/script/general/parser/create_mt.sim @@ -99,7 +99,7 @@ $i_binary2 = varchar(20) # illegal string $i_bool = boolean $nchar = nchar # nchar with unspecified length print ========== create_mt.sim case4: illegal data types in tags test -sql_error create table $mt (ts timestamp, col int) tags (tag1 timestamp ) +##sql_error create table $mt (ts timestamp, col int) tags (tag1 timestamp ) sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_ts ) sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_binary ) sql_error create table $mt (ts timestamp, col int) tags (tag1 $i_bigint ) @@ -253,4 +253,4 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/fill_us.sim b/tests/script/general/parser/fill_us.sim index 8cd2c333475a0d0140eb5c0c8ee0fa4186fccc97..762413d0a1e975c778ccd3d31e54e0f2d347cef2 100644 --- a/tests/script/general/parser/fill_us.sim +++ b/tests/script/general/parser/fill_us.sim @@ -959,14 +959,14 @@ endi if $data31 != 9.000000000 then return -1 endi -if $data41 != null then +if $data41 != NULL then print ===== $data41 return -1 endi if $data51 != 16.000000000 then return -1 endi -if $data61 != null then +if $data61 != NULL then print ===== $data61 return -1 endi diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index ad900b92e01ca46b7f4fb8afbd30a5a408bfb7ac..a3470b1763e30b8c6c6baa7b897de40910a84743 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -817,6 +817,9 @@ print ====================> TODO stddev + normal column filter print ====================> irate +sql_error select irate(f1) from st1; +sql select irate(f1) from st1 group by tbname; + sql select irate(k) from t1 if $rows != 1 then return -1 @@ -1073,6 +1076,12 @@ sql insert into t0 values('2020-1-1 1:3:9', 9); sql insert into t0 values('2020-1-1 1:4:10', 10); sql insert into t1 values('2020-1-1 1:1:2', 2); +print ===========================>td-4739 +sql select diff(val) from (select derivative(k, 1s, 0) val from t1); +if $rows != 0 then + return -1 +endi + sql insert into t1 values('2020-1-1 1:1:4', 20); sql insert into t1 values('2020-1-1 1:1:6', 200); sql insert into t1 values('2020-1-1 1:1:8', 2000); @@ -1123,4 +1132,19 @@ if $data92 != t1 then return -1 endi +print =========================>TD-5190 +sql select stddev(f1) from st1 where ts>'2021-07-01 1:1:1' and ts<'2021-07-30 00:00:00' interval(1d) fill(NULL); +if $rows != 29 then + return -1 +endi + +if $data00 != @21-07-01 00:00:00.000@ then + return -1 +endi + +if $data01 != NULL then + return -1 +endi + + sql select derivative(test_column_alias_name, 1s, 0) from (select avg(k) test_column_alias_name from t1 interval(1s)); diff --git a/tests/script/general/parser/gendata.sh b/tests/script/general/parser/gendata.sh index f56fdc34680f6fda559136a68f34ad38ed406bbd..b2074147ca0a0a4483d19192b45d875ad24a1541 100755 --- a/tests/script/general/parser/gendata.sh +++ b/tests/script/general/parser/gendata.sh @@ -4,3 +4,5 @@ Cur_Dir=$(pwd) echo $Cur_Dir echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql +echo "'2020-1-2 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql +echo "'2020-1-3 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index 124e76e85cb7451fe5f0850985c5b30f90587fee..1fe19714bbd516c2e8938ce1290f04f8d2053839 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -654,53 +654,142 @@ if $data31 != @20-03-27 05:10:19.000@ then return -1 endi -#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; -#if $rows != 40 then -# return -1 -#endi -# -#if $data01 != 1.000000000 then -# return -1 -#endi -#if $data02 != t1 then -# return -1 -#endi -#if $data03 != 1 then -# return -1 -#endi -#if $data04 != 1 then -# return -1 -#endi -# -#if $data11 != 1.000000000 then -# return -1 -#endi -#if $data12 != t1 then -# return -1 -#endi -#if $data13 != 1 then -# return -1 -#endi -#if $data14 != 1 then -# return -1 -#endi -# -#sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; -#if $rows != 2 then -# return -1 -#endi -# -#if $data11 != 1.000000000 then -# return -1 -#endi -#if $data12 != t2 then -# return -1 -#endi -#if $data13 != 1 then -# return -1 -#endi -#if $data14 != 2 then -# return -1 -#endi +print ===============> +sql select stddev(c),c from st where t2=1 or t2=2 group by c; +if $rows != 4 then + return -1 +endi + +if $data00 != 0.000000000 then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data10 != 0.000000000 then + return -1 +endi + +if $data11 != 2 then + return -1 +endi + +if $data20 != 0.000000000 then + return -1 +endi + +if $data21 != 3 then + return -1 +endi + +if $data30 != 0.000000000 then + return -1 +endi + +if $data31 != 4 then + return -1 +endi + +sql_error select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,c; +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2; +if $rows != 40 then + return -1 +endi + +if $data01 != 1.000000000 then + return -1 +endi +if $data02 != t1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi +if $data04 != 1 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t1 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi + +sql select irate(c) from st where t1="1" and ts >= '2020-03-27 04:11:17.732' and ts < '2020-03-27 05:11:17.732' interval(1m) sliding(15s) group by tbname,t1,t2 limit 1; +if $rows != 2 then + return -1 +endi + +if $data11 != 1.000000000 then + return -1 +endi +if $data12 != t2 then + return -1 +endi +if $data13 != 1 then + return -1 +endi +if $data14 != 2 then + return -1 +endi + +sql create table m1 (ts timestamp, k int, f1 int) tags(a int); +sql create table tm0 using m1 tags(0); +sql create table tm1 using m1 tags(1); + +sql insert into tm0 values('2020-1-1 1:1:1', 1, 10); +sql insert into tm0 values('2020-1-1 1:1:2', 1, 20); +sql insert into tm1 values('2020-2-1 1:1:1', 2, 10); +sql insert into tm1 values('2020-2-1 1:1:2', 2, 20); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 100 +system sh/exec.sh -n dnode1 -s start +sleep 100 + +sql connect +sleep 100 +sql use group_db0; + +print =========================>TD-4894 +sql select count(*),k from m1 group by k; +if $rows != 2 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data10 != 2 then + return -1 +endi + +if $data11 != 2 then + return -1 +endi + +sql_error select count(*) from m1 group by tbname,k,f1; +sql_error select count(*) from m1 group by tbname,k,a; +sql_error select count(*) from m1 group by k, tbname; +sql_error select count(*) from m1 group by k,f1; +sql_error select count(*) from tm0 group by tbname; +sql_error select count(*) from tm0 group by a; +sql_error select count(*) from tm0 group by k,f1; + +sql_error select count(*),f1 from m1 group by tbname,k; system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/having.sim b/tests/script/general/parser/having.sim index ddafdd73293d75bc99380969d98c7fb986420a38..e063333853e04faf1a7f4988b6dd1f11207aee5d 100644 --- a/tests/script/general/parser/having.sim +++ b/tests/script/general/parser/having.sim @@ -1,6 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 system sh/exec.sh -n dnode1 -s start @@ -1835,5 +1835,8 @@ if $data04 != 1 then endi sql_error select top(f1,2) from tb1 group by f1 having count(f1) > 0; +sql_error select count(*) from tb1 group by f1 having last(*) > 0; + +print bug for select count(*) k from tb1 group by f1 having k > 0; system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/having_child.sim b/tests/script/general/parser/having_child.sim index a38db3fe44e8857ba646128a856371468d723b2b..0fe5448869a5720a62550a88981114e737e4965b 100644 --- a/tests/script/general/parser/having_child.sim +++ b/tests/script/general/parser/having_child.sim @@ -306,41 +306,11 @@ endi sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having twa(f1) > 0; -sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3; -if $rows != 1 then - return -1 -endi -if $data00 != 4.000000000 then - return -1 -endi -if $data01 != 2 then - return -1 -endi -if $data02 != 8 then - return -1 -endi -if $data03 != 4.000000000 then - return -1 -endi +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having twa(f1) > 3; sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by tbname having sum(f1) > 0; -sql select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4; -if $rows != 1 then - return -1 -endi -if $data00 != 2.000000000 then - return -1 -endi -if $data01 != 2 then - return -1 -endi -if $data02 != 4 then - return -1 -endi -if $data03 != 2.000000000 then - return -1 -endi +sql_error select avg(f1),count(f1),sum(f1),twa(f1) from tb1 group by f1 having sum(f1) = 4; sql select avg(f1),count(f1),sum(f1) from tb1 group by f1 having sum(f1) > 0; if $rows != 4 then diff --git a/tests/script/general/parser/import_file.sim b/tests/script/general/parser/import_file.sim index e9f0f1ed085cc75238681dc08b9601a8d591f6c4..cf11194ba7c3b805725a665c6f92d6bb465b9e4e 100644 --- a/tests/script/general/parser/import_file.sim +++ b/tests/script/general/parser/import_file.sim @@ -15,6 +15,8 @@ $inFileName = '~/data.csv' $numOfRows = 10000 system general/parser/gendata.sh +sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) tags(a int, b binary(12)); + sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) print ====== create tables success, starting import data @@ -23,13 +25,48 @@ sql import into tbx file '~/data.sql' sql select count(*) from tbx if $rows != 1 then + print expect 1, actual: $rows + return -1 +endi + +if $data00 != 3 then + return -1 +endi + +sql drop table tbx; + +sql insert into tbx using stbx tags(1,'abc') file '~/data.sql'; +sql insert into tbx using stbx tags(1,'abc') file '~/data.sql'; + +sql select count(*) from tbx +if $rows != 1 then + return -1 +endi + +if $data00 != 3 then return -1 endi -#if $data00 != $numOfRows then -# print "expect: $numOfRows, act: $data00" -# return -1 -#endi +sql drop table tbx; +sql insert into tbx using stbx(b) tags('abcf') file '~/data.sql'; + +sql select ts,a,b from tbx; +if $rows != 3 then + return -1 +endi + +if $data00 != @20-01-01 01:01:01.000@ then + print expect 20-01-01 01:01:01.000 , actual: $data00 + return -1 +endi + +if $data01 != NULL then + return -1 +endi + +if $data02 != @abcf@ then + return -1 +endi system rm -f ~/data.sql diff --git a/tests/script/general/parser/last_cache.sim b/tests/script/general/parser/last_cache.sim index 4b3285871b8e9414877a53aa205ba2e747e9d8e1..9c414263ecc65cc11327bbcfc7a79131984393b9 100644 --- a/tests/script/general/parser/last_cache.sim +++ b/tests/script/general/parser/last_cache.sim @@ -1,6 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/general/parser/lastrow.sim b/tests/script/general/parser/lastrow.sim index 2b8f294d5d058f4b7cc8c45380862180e35a5899..7cdd04e2ccdb93c7e1f84298101d74e7c3af061f 100644 --- a/tests/script/general/parser/lastrow.sim +++ b/tests/script/general/parser/lastrow.sim @@ -70,4 +70,21 @@ sleep 100 run general/parser/lastrow_query.sim -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +print =================== last_row + nested query +sql use $db +sql create table lr_nested(ts timestamp, f int) +sql insert into lr_nested values(now, 1) +sql insert into lr_nested values(now+1s, null) +sql select last_row(*) from (select * from lr_nested) +if $rows != 1 then + return -1 +endi +if $data01 != NULL then + return -1 +endi +sql select last_row(*) from (select f from lr_nested) +if $rows != 1 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/line_insert.sim b/tests/script/general/parser/line_insert.sim new file mode 100644 index 0000000000000000000000000000000000000000..85f2714ad3100766557797d2158d9d3e181b0f0b --- /dev/null +++ b/tests/script/general/parser/line_insert.sim @@ -0,0 +1,53 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/exec.sh -n dnode1 -s start +sleep 2000 +sql connect + +print =============== step1 +$db = testlp +$mte = ste +$mt = st +sql drop database $db -x step1 +step1: +sql create database $db precision 'us' +sql use $db +sql create stable $mte (ts timestamp, f int) TAGS(t1 bigint) + +line_insert st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns +line_insert st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64 1626006833640000000ns +line_insert ste,t2=5f64,t3=L"ste" c1=true,c2=4i64,c3="iam" 1626056811823316532ns +line_insert stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns +sql select * from st +if $rows != 2 then + return -1 +endi + +if $data00 != @21-07-11 20:33:53.639000@ then + return -1 +endi + +if $data02 != @passit@ then + return -1 +endi + +sql select * from stf +if $rows != 1 then + return -1 +endi + +sql select * from ste +if $rows != 1 then + return -1 +endi + +#print =============== clear +sql drop database $db +sql show databases +if $rows != 0 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim index 8249d9197f55998ae26cb6dd232b6a701bf0a32c..3c1ba0336973b8d07c785337de2d2c66202520c4 100644 --- a/tests/script/general/parser/nestquery.sim +++ b/tests/script/general/parser/nestquery.sim @@ -179,34 +179,168 @@ if $data21 != 49.500000000 then return -1 endi -#define TSDB_FUNC_APERCT 7 -#define TSDB_FUNC_LAST_ROW 10 -#define TSDB_FUNC_TWA 14 -#define TSDB_FUNC_LEASTSQR 15 -#define TSDB_FUNC_ARITHM 23 -#define TSDB_FUNC_DIFF 24 -#define TSDB_FUNC_INTERP 28 -#define TSDB_FUNC_RATE 29 -#define TSDB_FUNC_IRATE 30 -#define TSDB_FUNC_DERIVATIVE 32 - sql_error select stddev(c1) from (select c1 from nest_tb0); sql_error select percentile(c1, 20) from (select * from nest_tb0); +sql_error select interp(c1) from (select * from nest_tb0); +sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); +sql_error select twa(c1) from (select c1 from nest_tb0); +sql_error select irate(c1) from (select c1 from nest_tb0); +sql_error select diff(c1), twa(c1) from (select * from nest_tb0); +sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0); + +sql select apercentile(c1, 50) from (select * from nest_tb0) interval(1d) +if $rows != 7 then + return -1 +endi + +if $data00 != @20-09-15 00:00:00.000@ then + return -1 +endi + +if $data01 != 47.571428571 then + return -1 +endi + +if $data10 != @20-09-16 00:00:00.000@ then + return -1 +endi + +if $data11 != 49.666666667 then + return -1 +endi + +if $data20 != @20-09-17 00:00:00.000@ then + return -1 +endi + +if $data21 != 49.000000000 then + return -1 +endi + +if $data30 != @20-09-18 00:00:00.000@ then + return -1 +endi + +if $data31 != 48.333333333 then + return -1 +endi + +sql select twa(c1) from (select * from nest_tb0); +if $rows != 1 then + return -1 +endi + +if $data00 != 49.500000000 then + return -1 +endi + +sql select leastsquares(c1, 1, 1) from (select * from nest_tb0); +if $rows != 1 then + return -1 +endi + +if $data00 != @{slop:0.000100, intercept:49.000000}@ then + return -1 +endi + +sql select irate(c1) from (select * from nest_tb0); +if $data00 != 0.016666667 then + return -1 +endi + +sql select derivative(c1, 1s, 0) from (select * from nest_tb0); +if $rows != 9999 then + return -1 +endi + +if $data00 != @20-09-15 00:01:00.000@ then + return -1 +endi + +if $data01 != 0.016666667 then + return -1 +endi + +if $data10 != @20-09-15 00:02:00.000@ then + return -1 +endi + +if $data11 != 0.016666667 then + return -1 +endi + +sql select diff(c1) from (select * from nest_tb0); +if $rows != 9999 then + return -1 +endi sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d); +if $rows != 7 then + return -1 +endi -sql select top(x, 20) from (select c1 x from nest_tb0); +if $data00 != @20-09-15 00:00:00.000@ then + return -1 +endi -sql select bottom(x, 20) from (select c1 x from nest_tb0) +if $data01 != 48.666666667 then + print expect 48.666666667, actual: $data01 + return -1 +endi -print ===================> complex query +if $data02 != 70080.000000000 then + return -1 +endi +if $data03 != 99 then + return -1 +endi + +if $data04 != 0 then + return -1 +endi +if $data05 != 1440 then + return -1 +endi + +if $data06 != 0 then + print $data06 + return -1 +endi + +if $data07 != 1 then + return -1 +endi + +if $data08 != 99.000000000 then + print expect 99.000000000, actual: $data08 + return -1 +endi + +if $data10 != @20-09-16 00:00:00.000@ then + return -1 +endi + +if $data11 != 49.777777778 then + return -1 +endi + +if $data12 != 71680.000000000 then + return -1 +endi + +sql select top(x, 20) from (select c1 x from nest_tb0); + +sql select bottom(x, 20) from (select c1 x from nest_tb0) print ===================> group by + having +print =========================> ascending order/descending order + + print =========================> nest query join @@ -273,7 +407,6 @@ if $data03 != @20-09-15 00:00:00.000@ then return -1 endi -sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); sql select diff(val) from (select c1 val from nest_tb0); if $rows != 9999 then return -1 @@ -287,4 +420,92 @@ if $data01 != 1 then return -1 endi +sql_error select last_row(*) from (select * from nest_tb0) having c1 > 0 + +print ===========>td-4805 +sql_error select tbname, i from (select * from nest_tb0) group by i; + +sql select count(*),c1 from (select * from nest_tb0) where c1 < 2 group by c1; +if $rows != 2 then + return -1 +endi + +if $data00 != 100 then + return -1 +endi + +if $data01 != 0 then + return -1 +endi + +if $data10 != 100 then + return -1 +endi + +if $data11 != 1 then + return -1 +endi + +print =====================>TD-5157 +sql select twa(c1) from nest_tb1 interval(19a); +if $rows != 10000 then + return -1 +endi + +if $data00 != @20-09-14 23:59:59.992@ then + return -1 +endi + +if $data01 != 0.000083333 then + return -1 +endi + +print ======================>TD-5271 +sql select min(val),max(val),first(val),last(val),count(val),sum(val),avg(val) from (select count(*) val from nest_mt0 group by tbname) +if $rows != 1 then + return -1 +endi + +if $data00 != 10000 then + return -1 +endi + +if $data01 != 10000 then + return -1 +endi + +if $data04 != 10 then + return -1 +endi + +if $data05 != 100000 then + return -1 +endi + +print =================>us database interval query, TD-5039 +sql create database test precision 'us'; +sql use test; +sql create table t1(ts timestamp, k int); +sql insert into t1 values('2020-01-01 01:01:01.000', 1) ('2020-01-01 01:02:00.000', 2); +sql select avg(k) from (select avg(k) k from t1 interval(1s)) interval(1m); +if $rows != 2 then + return -1 +endi + +if $data00 != @20-01-01 01:01:00.000000@ then + return -1 +endi + +if $data01 != 1.000000000 then + return -1 +endi + +if $data10 != @20-01-01 01:02:00.000000@ then + return -1 +endi + +if $data11 != 2.000000000 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index 45f6f5c49fbe48f6b881e4f03c3ed1af18a4b1a9..eb6cd75d2104f7ff61b5f5e5bccc12fdd239d3d5 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -68,6 +68,27 @@ endw sleep 100 + +#======================= only check first table tag, TD-4827 +sql select count(*) from $mt where t1 in (0) +if $rows != 1 then + return -1 +endi +if $data00 != $rowNum then + return -1; +endi + +$secTag = ' . abc +$secTag = $secTag . 0 +$secTag = $secTag . ' +sql select count(*) from $mt where t2 =$secTag and t1 in (0) +if $rows != 1 then + return -1 +endi +if $data00 != $rowNum then + return -1; +endi + #================================ sql select ts from select_tags_mt0 print $rows @@ -169,32 +190,32 @@ if $rows != 12800 then return -1 endi -sql select top(c1, 100), tbname, t1, t2 from select_tags_mt0; -if $rows != 100 then +sql select top(c1, 80), tbname, t1, t2 from select_tags_mt0; +if $rows != 80 then return -1 endi -if $data00 != @70-01-01 08:03:30.100@ then +if $data00 != @70-01-01 08:03:40.100@ then return -1 endi -if $data10 != @70-01-01 08:03:30.200@ then +if $data10 != @70-01-01 08:03:40.200@ then return -1 endi -if $data01 != 110 then +if $data01 != 111 then return -1 endi -if $data02 != @select_tags_tb11@ then +if $data02 != @select_tags_tb12@ then return -1 endi -if $data03 != 11 then +if $data03 != 12 then return -1 endi -if $data04 != @abc11@ then +if $data04 != @abc12@ then return -1 endi @@ -227,8 +248,8 @@ if $data04 != @abc12@ then return -1 endi -sql select bottom(c1, 100), tbname, t1, t2 from select_tags_mt0; -if $rows != 100 then +sql select bottom(c1, 72), tbname, t1, t2 from select_tags_mt0; +if $rows != 72 then return -1 endi diff --git a/tests/script/general/parser/single_row_in_tb_query.sim b/tests/script/general/parser/single_row_in_tb_query.sim index 1f9cb8b558c90323e18602005e275f067efeb345..acf85ea6922048e10ce8bd93c9eadb799649750f 100644 --- a/tests/script/general/parser/single_row_in_tb_query.sim +++ b/tests/script/general/parser/single_row_in_tb_query.sim @@ -193,3 +193,7 @@ endi if $data04 != 1 then return -1 endi + +print ===============>safty check TD-4927 +sql select first(ts, c1) from sr_stb where ts<1 group by t1; +sql select first(ts, c1) from sr_stb where ts>0 and ts<1; \ No newline at end of file diff --git a/tests/script/general/parser/subInfrom.sim b/tests/script/general/parser/subInfrom.sim deleted file mode 100644 index e47831ee8797e3a9a09ee933c7286740120623e6..0000000000000000000000000000000000000000 --- a/tests/script/general/parser/subInfrom.sim +++ /dev/null @@ -1,147 +0,0 @@ -system sh/stop_dnodes.sh - -system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c walLevel -v 1 -system sh/exec.sh -n dnode1 -s start -sleep 100 -sql connect -sleep 100 - -print ========== sub_in_from.sim -$i = 0 - -$dbPrefix = subdb -$tbPrefix = sub_tb -$stbPrefix = sub_stb -$tbNum = 10 -$rowNum = 1000 -$totalNum = $tbNum * $rowNum -$loops = 200000 -$log = 10000 -$ts0 = 1537146000000 -$delta = 600000 -$i = 0 -$db = $dbPrefix . $i -$stb = $stbPrefix . $i - -sql drop database $db -x step1 -step1: -sql create database $db cache 16 maxrows 4096 keep 36500 -print ====== create tables -sql use $db -sql create table $stb (ts timestamp, c1 int, c2 bigint, c3 float, c4 double, c5 smallint, c6 tinyint, c7 bool, c8 binary(10), c9 nchar(10)) tags(t1 int) - -$i = 0 -$ts = $ts0 -$halfNum = $tbNum / 2 -while $i < $halfNum - $tbId = $i + $halfNum - $tb = $tbPrefix . $i - $tb1 = $tbPrefix . $tbId - sql create table $tb using $stb tags( $i ) - sql create table $tb1 using $stb tags( $tbId ) - - $x = 0 - while $x < $rowNum - $xs = $x * $delta - $ts = $ts0 + $xs - $c = $x / 10 - $c = $c * 10 - $c = $x - $c - $binary = 'binary . $c - $binary = $binary . ' - $nchar = 'nchar . $c - $nchar = $nchar . ' - sql insert into $tb values ( $ts , $c , $c , $c , $c , $c , $c , true, $binary , $nchar ) - sql insert into $tb1 values ( $ts , $c , NULL , $c , NULL , $c , $c , true, $binary , $nchar ) - $x = $x + 1 - endw - - $i = $i + 1 -endw -print ====== tables created - -sql_error select count(*) from (select count(*) from abc.sub_stb0) -sql_error select val + 20 from (select count(*) from sub_stb0 interval(10h)) -sql_error select abc+20 from (select count(*) from sub_stb0 interval(1s)) - -sql select count(*) from (select count(*) from sub_stb0 interval(10h)) -if $rows != 1 then - return -1 -endi - -if $data00 != 18 then - print expect 18, actual: $data00 - return -1 -endi - -sql select ts from (select count(*) from sub_stb0 interval(10h)) -if $rows != 18 then - return -1 -endi - -if $data00 != @18-09-17 04:00:00.000@ then - return -1 -endi - -if $data01 != @18-09-17 14:00:00.000@ then - return -1 -endi - -sql select val + 20, val from (select count(*) as val from sub_stb0 interval(10h)) -if $rows != 18 then - return -1 -endi - -if $data00 != 320.000000 then - return -1 -endi - -if $data01 != 300 then - return -1 -endi - -if $data10 != 620 then - return -1 -endi - -if $data11 != 600 then - return -1 -endi - -if $data20 != 620 then - return -1 -endi - -if $data21 != 600 then - return -1 -endi - -sql select max(val), min(val), max(val) - min(val) from (select count(*) val from sub_stb0 interval(10h)) -if $rows != 1 then - return -1 -endi - -if $data00 != 600 then - return -1 -endi - -if $data01 != 100 then - return -1 -endi - -if $data02 != 500.000000 then - return -1 -endi - -sql select first(ts,val),last(ts,val) from (select count(*) val from sub_stb0 interval(10h)) -sql select top(val, 5) from (select count(*) val from sub_stb0 interval(10h)) -sql select diff(val) from (select count(*) val from sub_stb0 interval(10h)) -sql select apercentile(val, 50) from (select count(*) val from sub_stb0 interval(10h)) - -# not support yet -sql select percentile(val, 50) from (select count(*) val from sub_stb0 interval(10h)) -sql select stddev(val) from (select count(*) val from sub_stb0 interval(10h)) - -print ====================>complex query - diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 545e19edecba723685d1d5a0e2b4b1506c298b4b..fcd9d49fe555ae749da0fcd799e1866a8a05f656 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -39,7 +39,6 @@ run general/parser/slimit1.sim run general/parser/slimit_alter_tags.sim run general/parser/tbnameIn.sim run general/parser/join.sim -#run general/parser/join_multitables.sim run general/parser/join_multivnode.sim run general/parser/join_manyblocks.sim run general/parser/projection_limit_offset.sim @@ -61,6 +60,9 @@ run general/parser/slimit_alter_tags.sim run general/parser/binary_escapeCharacter.sim run general/parser/between_and.sim run general/parser/last_cache.sim +run general/parser/slimit_alter_tags.sim +run general/parser/udf.sim +run general/parser/udf_dll.sim +run general/parser/udf_dll_stable.sim run general/parser/nestquery.sim run general/parser/precision_ns.sim - diff --git a/tests/script/general/parser/top_groupby.sim b/tests/script/general/parser/top_groupby.sim new file mode 100644 index 0000000000000000000000000000000000000000..5709f4d1d7210761292d59aefa8984dad2fd2f23 --- /dev/null +++ b/tests/script/general/parser/top_groupby.sim @@ -0,0 +1,52 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st2 (ts timestamp, f1 int, f2 float, f3 double, f4 bigint, f5 smallint, f6 tinyint, f7 bool, f8 binary(10), f9 nchar(10)) tags (id1 int, id2 float, id3 nchar(10), id4 double, id5 smallint, id6 bigint, id7 binary(10)) + +sql create table tb1 using st2 tags (1,1.0,"1",1.0,1,1,"1"); + +sql insert into tb1 values (now-200s,1,1.0,1.0,1,1,1,true,"1","1") +sql insert into tb1 values (now-100s,2,2.0,2.0,2,2,2,true,"2","2") +sql insert into tb1 values (now,3,3.0,3.0,3,3,3,true,"3","3") +sql insert into tb1 values (now+100s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+200s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+300s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+400s,4,4.0,4.0,4,4,4,true,"4","4") +sql insert into tb1 values (now+500s,4,4.0,4.0,4,4,4,true,"4","4") + +sql select top(f1, 2) from tb1 group by f1; + +if $rows != 5 then + return -1 +endi + +sql select bottom(f1, 2) from tb1 group by f1; + +if $rows != 5 then + return -1 +endi + +sql select top(f1, 100) from tb1 group by f1; + +if $rows != 8 then + return -1 +endi + +sql select bottom(f1, 100) from tb1 group by f1; + +if $rows != 8 then + return -1 +endi + diff --git a/tests/script/general/parser/udf.sim b/tests/script/general/parser/udf.sim new file mode 100644 index 0000000000000000000000000000000000000000..c048a7c37a82b8591f60c209cf49cc40bdfd10f1 --- /dev/null +++ b/tests/script/general/parser/udf.sim @@ -0,0 +1,637 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start +system sh/prepare_udf.sh + +sleep 100 +sql connect +print ======================== dnode1 start + +sql create function n01 as '/tmp/normal' outputtype int; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n01 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != INT then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n01; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function n02 as '/tmp/normal' outputtype bool; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n02 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != BOOL then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n02; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + +sql create function n03 as '/tmp/normal' outputtype TINYINT; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n03 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != TINYINT then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n03; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + + +sql create function n04 as '/tmp/normal' outputtype SMALLINT; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n04 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != SMALLINT then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n04; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + + + +sql create function n05 as '/tmp/normal' outputtype INT; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n05 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != INT then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n05; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + + + + + + + +sql create function n06 as '/tmp/normal' outputtype BIGINT; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n06 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != BIGINT then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n06; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + + + + + +sql create function n07 as '/tmp/normal' outputtype FLOAT; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n07 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != FLOAT then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n07; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + + + + +sql create function n08 as '/tmp/normal' outputtype DOUBLE; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n08 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != DOUBLE then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n08; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + + + + +sql create function n09 as '/tmp/normal' outputtype BINARY; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n09 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != BINARY(0) then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n09; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function n10 as '/tmp/normal' outputtype BINARY(10); +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n10 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != BINARY(10) then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n10; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function n11 as '/tmp/normal' outputtype TIMESTAMP; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n11 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != TIMESTAMP then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n11; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function n12 as '/tmp/normal' outputtype NCHAR; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n12 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != NCHAR(0) then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n12; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function n13 as '/tmp/normal' outputtype NCHAR(10); +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n13 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data03 != NCHAR(10) then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n13; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + + +sql create function n14 as '/tmp/normal' outputtype TINYINT UNSIGNED; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n14 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n14; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + +sql create function n15 as '/tmp/normal' outputtype SMALLINT UNSIGNED; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n15 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n15; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function n16 as '/tmp/normal' outputtype INT UNSIGNED; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n16 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n16; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + + +sql create function n17 as '/tmp/normal' outputtype BIGINT UNSIGNED; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n17 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 0 then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n17; + +sql show functions; +if $rows != 0 then + return -1 +endi + + +sql create aggregate function n18 as '/tmp/normal' outputtype BIGINT UNSIGNED; +sql show functions; +if $rows != 1 then + return -1 +endi + + +if $data00 != n18 then + return -1 +endi +if $data01 != /tmp/normal then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data05 != 5 then + return -1 +endi + +sql drop function n18; + +sql show functions; +if $rows != 0 then + return -1 +endi + + + +sql create function t01 as '/tmp/normal' outputtype INT; +sql_error create function t01 as '/tmp/normal' outputtype SMALLINT; +sql drop function t01; +sql create function t01 as '/tmp/normal' outputtype INT; +sql create function t02 as '/tmp/normal' outputtype SMALLINT; +sql show functions; +if $rows != 2 then + return -1 +endi + + + + + + +sql_error create function e1 as '/tmp/normal'; +sql_error create function e2 as '/tmp/normal' outputtype; +sql_error create function e3 as '/tmp/normal' a; +sql_error create function e4 as '/tmp/normal' outputtype a; +sql_error create function e5 as '/tmp/normal' outputtype bool int; +sql_error create function as '/tmp/normal' outputtype; +sql_error create function e6 as '/tmp/empty' outputtype int; +sql_error create function e7 as '/tmp/big' outputtype int; +sql_error create function e8 as '/tmp/noexistfile' outputtype int; +sql_error create function e0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456 as '/tmp/normal' outputtype int; +sql_error create function e9 as outputtype int; + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/udf_dll.sim b/tests/script/general/parser/udf_dll.sim new file mode 100644 index 0000000000000000000000000000000000000000..0f9436762adb645785ddcf9a4abaf4a5be810a34 --- /dev/null +++ b/tests/script/general/parser/udf_dll.sim @@ -0,0 +1,494 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start +system sh/prepare_udf.sh + +sleep 100 +sql connect +print ======================== dnode1 start + +sql create function add_one as '/tmp/add_one.so' outputtype int; +sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype int; +sql show functions; +if $rows != 2 then + return -1 +endi + +sql create database db; +sql use db; +sql create table tb1 (ts timestamp, f1 int, f2 bool, f3 binary(10)); +sql insert into tb1 values ('2021-03-23 17:17:19.660', 1, true, 'tb1-1'); +sql insert into tb1 values ('2021-03-23 19:23:28.595', 2, false, 'tb1-2'); +sql insert into tb1 values ('2021-03-23 19:33:39.070', 3, true, 'tb1-3'); +sql insert into tb1 values ('2021-03-23 19:34:37.670', 4, false, 'tb1-4'); +sql insert into tb1 values ('2021-03-24 19:08:06.609', 5, true, 'tb1-5'); +sql insert into tb1 values ('2021-03-24 19:26:38.231', 6, false, 'tb1-6'); +sql insert into tb1 values ('2021-03-25 10:03:17.688', 7, true, 'tb1-7'); + +sql select add_one(f1) from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data30 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data50 != 7 then + return -1 +endi +if $data60 != 8 then + return -1 +endi + + +sql select sum_double(f1) from tb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 56 then + return -1 +endi + + +sql select ts,add_one(f1),f1 from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.595@ then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data12 != 2 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data22 != 3 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data32 != 4 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.609@ then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data42 != 5 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.231@ then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data52 != 6 then + return -1 +endi +if $data60 != @21-03-25 10:03:17.688@ then + return -1 +endi +if $data61 != 8 then + return -1 +endi +if $data62 != 7 then + return -1 +endi + + + + +sql select add_one(f1),add_one(f1) from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != 5 then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != 7 then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data60 != 8 then + return -1 +endi +if $data61 != 8 then + return -1 +endi + + +sql select add_one(f1)+1 from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 3.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data20 != 5.000000000 then + return -1 +endi +if $data30 != 6.000000000 then + return -1 +endi +if $data40 != 7.000000000 then + return -1 +endi +if $data50 != 8.000000000 then + return -1 +endi +if $data60 != 9.000000000 then + return -1 +endi + + +sql select sum_double(f1)+1 from tb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 57.000000000 then + return -1 +endi + + +sql select add_one(f1)+1,f1 from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 5.000000000 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 6.000000000 then + return -1 +endi +if $data31 != 4 then + return -1 +endi +if $data40 != 7.000000000 then + return -1 +endi +if $data41 != 5 then + return -1 +endi +if $data50 != 8.000000000 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data60 != 9.000000000 then + return -1 +endi +if $data61 != 7 then + return -1 +endi + + +sql select sum_double(f1) from tb1 interval (10a); +if $rows != 7 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.590@ then + return -1 +endi +if $data11 != 4 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 6 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 8 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.600@ then + return -1 +endi +if $data41 != 10 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.230@ then + return -1 +endi +if $data51 != 12 then + return -1 +endi +if $data60 != @21-03-25 10:03:17.680@ then + return -1 +endi +if $data61 != 14 then + return -1 +endi + +sql select ts,add_one(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000"; +if $rows != 6 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.595@ then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.609@ then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.231@ then + return -1 +endi +if $data51 != 7 then + return -1 +endi + +sql select sum_double(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h); +if $rows != 3 then + return -1 +endi + +if $data00 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data11 != 18 then + return -1 +endi +if $data20 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data21 != 22 then + return -1 +endi + + +sql select sum_double(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h) fill(value,999); +if $rows != 28 then + return -1 +endi + +sql_error select add_one(f1) from tb1 group by f1; + +sql select sum_double(f1) from tb1 group by f1; +if $rows != 7 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 4 then + return -1 +endi +if $data20 != 6 then + return -1 +endi +if $data30 != 8 then + return -1 +endi +if $data40 != 10 then + return -1 +endi +if $data50 != 12 then + return -1 +endi +if $data60 != 14 then + return -1 +endi + +sql select sum_double(f1) from tb1 interval (1h) order by ts desc; +if $rows != 4 then + return -1 +endi + +if $data00 != @21-03-25 10:00:00.000@ then + return -1 +endi +if $data01 != 14 then + return -1 +endi +if $data10 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data11 != 22 then + return -1 +endi +if $data20 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data21 != 18 then + return -1 +endi +if $data30 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data31 != 2 then + return -1 +endi + + +sql select add_one(f1) from tb1 limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi + + +sql select sum_double(f1) from tb1 interval (1d) limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != @21-03-23 00:00:00.000@ then + return -1 +endi +if $data01 != 20 then + return -1 +endi +if $data10 != @21-03-24 00:00:00.000@ then + return -1 +endi +if $data11 != 22 then + return -1 +endi + + +sql_error select ts,sum_double(f1),f1 from tb1; +sql_error select add_one(f1),count(f1) from tb1; +sql_error select sum_double(f1),count(f1) from tb1; +sql_error select add_one(f1),top(f1,3) from tb1; +sql_error select add_one(f1) from tb1 interval(10a); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/udf_dll_stable.sim b/tests/script/general/parser/udf_dll_stable.sim new file mode 100644 index 0000000000000000000000000000000000000000..b8da57467e912ff27f4fbda7226c75e089f04808 --- /dev/null +++ b/tests/script/general/parser/udf_dll_stable.sim @@ -0,0 +1,1163 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 1 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1 +system sh/exec.sh -n dnode1 -s start +system sh/prepare_udf.sh + +sleep 100 +sql connect +print ======================== dnode1 start + +sql create function add_one as '/tmp/add_one.so' outputtype int; +sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype int; +sql show functions; +if $rows != 2 then + return -1 +endi + +sql create database db; +sql use db; +sql create stable stb1 (ts timestamp, f1 int, f2 bool, f3 binary(10)) tags(id1 int); +sql create table tb1 using stb1 tags(1); +sql insert into tb1 values ('2021-03-23 17:17:19.660', 1, true, 'tb1-1'); +sql insert into tb1 values ('2021-03-23 19:23:28.595', 2, false, 'tb1-2'); +sql insert into tb1 values ('2021-03-23 19:33:39.070', 3, true, 'tb1-3'); +sql insert into tb1 values ('2021-03-23 19:34:37.670', 4, false, 'tb1-4'); +sql insert into tb1 values ('2021-03-24 19:08:06.609', 5, true, 'tb1-5'); +sql insert into tb1 values ('2021-03-24 19:26:38.231', 6, false, 'tb1-6'); +sql insert into tb1 values ('2021-03-25 10:03:17.688', 7, true, 'tb1-7'); +sql create table tb2 using stb1 tags(2); +sql create table tb3 using stb1 tags(3); +sql create table tb4 using stb1 tags(4); +sql create table tb5 using stb1 tags(5); +sql create table tb6 using stb1 tags(6); +sql create table tb7 using stb1 tags(7); +sql create table tb8 using stb1 tags(8); +sql create table tb9 using stb1 tags(9); +sql insert into tb2 values ('2021-03-03 17:17:19.660', 1, true, 'tb2-1'); +sql insert into tb2 values ('2021-03-13 19:23:28.595', 2, false, 'tb2-2'); +sql insert into tb3 values ('2021-03-23 19:33:39.070', 3, true, 'tb3-1'); +sql insert into tb3 values ('2021-03-24 19:34:37.670', 4, false, 'tb3-2'); +sql insert into tb5 values ('2021-03-25 19:08:06.609', 1, true, 'tb5-1'); +sql insert into tb5 values ('2021-04-01 19:26:38.231', 2, false, 'tb5-2'); +sql insert into tb5 values ('2021-04-08 10:03:17.688', 3, true, 'tb5-3'); +sql insert into tb6 values ('2021-04-08 11:03:17.688', 1, true, 'tb6-1'); +sql insert into tb6 values ('2021-04-08 12:03:17.688', 2, true, 'tb6-2'); +sql insert into tb9 values ('2021-04-08 16:03:17.688', 4, true, 'tb9-1'); + +sql select add_one(f1) from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data30 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data50 != 7 then + return -1 +endi +if $data60 != 8 then + return -1 +endi + + +sql select sum_double(f1) from tb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 56 then + return -1 +endi + + +sql select ts,add_one(f1),f1 from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.595@ then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data12 != 2 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data22 != 3 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data32 != 4 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.609@ then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data42 != 5 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.231@ then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data52 != 6 then + return -1 +endi +if $data60 != @21-03-25 10:03:17.688@ then + return -1 +endi +if $data61 != 8 then + return -1 +endi +if $data62 != 7 then + return -1 +endi + + + + +sql select add_one(f1),add_one(f1) from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != 5 then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != 7 then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data60 != 8 then + return -1 +endi +if $data61 != 8 then + return -1 +endi + + +sql select add_one(f1)+1 from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 3.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data20 != 5.000000000 then + return -1 +endi +if $data30 != 6.000000000 then + return -1 +endi +if $data40 != 7.000000000 then + return -1 +endi +if $data50 != 8.000000000 then + return -1 +endi +if $data60 != 9.000000000 then + return -1 +endi + + +sql select sum_double(f1)+1 from tb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 57.000000000 then + return -1 +endi + + +sql select add_one(f1)+1,f1 from tb1; +if $rows != 7 then + return -1 +endi + +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 5.000000000 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 6.000000000 then + return -1 +endi +if $data31 != 4 then + return -1 +endi +if $data40 != 7.000000000 then + return -1 +endi +if $data41 != 5 then + return -1 +endi +if $data50 != 8.000000000 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data60 != 9.000000000 then + return -1 +endi +if $data61 != 7 then + return -1 +endi + + +sql select sum_double(f1) from tb1 interval (10a); +if $rows != 7 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.590@ then + return -1 +endi +if $data11 != 4 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 6 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 8 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.600@ then + return -1 +endi +if $data41 != 10 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.230@ then + return -1 +endi +if $data51 != 12 then + return -1 +endi +if $data60 != @21-03-25 10:03:17.680@ then + return -1 +endi +if $data61 != 14 then + return -1 +endi + +sql select ts,add_one(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000"; +if $rows != 6 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.595@ then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.609@ then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.231@ then + return -1 +endi +if $data51 != 7 then + return -1 +endi + +sql select sum_double(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h); +if $rows != 3 then + return -1 +endi + +if $data00 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data11 != 18 then + return -1 +endi +if $data20 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data21 != 22 then + return -1 +endi + + +sql select sum_double(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h) fill(value,999); +if $rows != 28 then + return -1 +endi + +sql_error select add_one(f1) from tb1 group by f1; + +sql select sum_double(f1) from tb1 group by f1; +if $rows != 7 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 4 then + return -1 +endi +if $data20 != 6 then + return -1 +endi +if $data30 != 8 then + return -1 +endi +if $data40 != 10 then + return -1 +endi +if $data50 != 12 then + return -1 +endi +if $data60 != 14 then + return -1 +endi + +sql select sum_double(f1) from tb1 interval (1h) order by ts desc; +if $rows != 4 then + return -1 +endi + +if $data00 != @21-03-25 10:00:00.000@ then + return -1 +endi +if $data01 != 14 then + return -1 +endi +if $data10 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data11 != 22 then + return -1 +endi +if $data20 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data21 != 18 then + return -1 +endi +if $data30 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data31 != 2 then + return -1 +endi + + +sql select add_one(f1) from tb1 limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi + + +sql select sum_double(f1) from tb1 interval (1d) limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != @21-03-23 00:00:00.000@ then + return -1 +endi +if $data01 != 20 then + return -1 +endi +if $data10 != @21-03-24 00:00:00.000@ then + return -1 +endi +if $data11 != 22 then + return -1 +endi + + +sql_error select ts,sum_double(f1),f1 from tb1; +sql_error select add_one(f1),count(f1) from tb1; +sql_error select sum_double(f1),count(f1) from tb1; +sql_error select add_one(f1),top(f1,3) from tb1; +sql_error select add_one(f1) from tb1 interval(10a); + + +sql select add_one(f1) from stb1; +if $rows != 17 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data30 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data50 != 7 then + return -1 +endi +if $data60 != 8 then + return -1 +endi +if $data70 != 2 then + return -1 +endi +if $data80 != 3 then + return -1 +endi +if $data90 != 4 then + return -1 +endi + + +sql select sum_double(f1) from stb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 102 then + return -1 +endi + + +sql select ts,add_one(f1),f1 from stb1; +if $rows != 17 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data02 != 1 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.595@ then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data12 != 2 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data22 != 3 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data32 != 4 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.609@ then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data42 != 5 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.231@ then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data52 != 6 then + return -1 +endi +if $data60 != @21-03-25 10:03:17.688@ then + return -1 +endi +if $data61 != 8 then + return -1 +endi +if $data62 != 7 then + return -1 +endi + + + + +sql select add_one(f1),add_one(f1) from stb1; +if $rows != 17 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data20 != 4 then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != 5 then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != 7 then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data60 != 8 then + return -1 +endi +if $data61 != 8 then + return -1 +endi + +sql select add_one(f1)+1 from stb1; +if $rows != 17 then + return -1 +endi + +if $data00 != 3.000000000 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data20 != 5.000000000 then + return -1 +endi +if $data30 != 6.000000000 then + return -1 +endi +if $data40 != 7.000000000 then + return -1 +endi +if $data50 != 8.000000000 then + return -1 +endi +if $data60 != 9.000000000 then + return -1 +endi + + +sql select sum_double(f1)+1 from stb1; +if $rows != 1 then + return -1 +endi + +if $data00 != 103.000000000 then + return -1 +endi + + +sql select add_one(f1)+1,f1 from stb1; +if $rows != 17 then + return -1 +endi + +if $data00 != 3.000000000 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 4.000000000 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 5.000000000 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 6.000000000 then + return -1 +endi +if $data31 != 4 then + return -1 +endi +if $data40 != 7.000000000 then + return -1 +endi +if $data41 != 5 then + return -1 +endi +if $data50 != 8.000000000 then + return -1 +endi +if $data51 != 6 then + return -1 +endi +if $data60 != 9.000000000 then + return -1 +endi +if $data61 != 7 then + return -1 +endi + + +sql select sum_double(f1) from stb1 interval (10a); +if $rows != 16 then + return -1 +endi + +if $data00 != @21-03-03 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-13 19:23:28.590@ then + return -1 +endi +if $data11 != 4 then + return -1 +endi +if $data20 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data30 != @21-03-23 19:23:28.590@ then + return -1 +endi +if $data31 != 4 then + return -1 +endi +if $data40 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data41 != 12 then + return -1 +endi +if $data50 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data51 != 8 then + return -1 +endi +if $data60 != @21-03-24 19:08:06.600@ then + return -1 +endi +if $data61 != 10 then + return -1 +endi +if $data70 != @21-03-24 19:26:38.230@ then + return -1 +endi +if $data71 != 12 then + return -1 +endi +if $data80 != @21-03-24 19:34:37.670@ then + return -1 +endi +if $data81 != 8 then + return -1 +endi +if $data90 != @21-03-25 10:03:17.680@ then + return -1 +endi +if $data91 != 14 then + return -1 +endi + + +sql select ts,add_one(f1) from stb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000"; +if $rows != 8 then + return -1 +endi + +if $data00 != @21-03-23 17:17:19.660@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:23:28.595@ then + return -1 +endi +if $data11 != 3 then + return -1 +endi +if $data20 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != @21-03-23 19:34:37.670@ then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != @21-03-24 19:08:06.609@ then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != @21-03-24 19:26:38.231@ then + return -1 +endi +if $data51 != 7 then + return -1 +endi +if $data60 != @21-03-23 19:33:39.070@ then + return -1 +endi +if $data61 != 4 then + return -1 +endi +if $data70 != @21-03-24 19:34:37.670@ then + return -1 +endi +if $data71 != 5 then + return -1 +endi + +sql select sum_double(f1) from stb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h); +if $rows != 3 then + return -1 +endi + +if $data00 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data11 != 24 then + return -1 +endi +if $data20 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data21 != 30 then + return -1 +endi + + +sql select sum_double(f1) from stb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h) fill(value,999); +if $rows != 28 then + return -1 +endi +if $data00 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 18:00:00.000@ then + return -1 +endi +if $data11 != 999 then + return -1 +endi +if $data20 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data21 != 24 then + return -1 +endi + +sql_error select add_one(f1) from stb1 group by f1; + +sql select sum_double(f1) from stb1 group by f1; +if $rows != 7 then + return -1 +endi + +if $data00 != 8 then + return -1 +endi +if $data10 != 16 then + return -1 +endi +if $data20 != 18 then + return -1 +endi +if $data30 != 24 then + return -1 +endi +if $data40 != 10 then + return -1 +endi +if $data50 != 12 then + return -1 +endi +if $data60 != 14 then + return -1 +endi + +sql select sum_double(f1) from stb1 interval (1h) order by ts desc; +if $rows != 12 then + return -1 +endi + +if $data00 != @21-04-08 16:00:00.000@ then + return -1 +endi +if $data01 != 8 then + return -1 +endi +if $data10 != @21-04-08 12:00:00.000@ then + return -1 +endi +if $data11 != 4 then + return -1 +endi +if $data20 != @21-04-08 11:00:00.000@ then + return -1 +endi +if $data21 != 2 then + return -1 +endi +if $data30 != @21-04-08 10:00:00.000@ then + return -1 +endi +if $data31 != 6 then + return -1 +endi +if $data40 != @21-04-01 19:00:00.000@ then + return -1 +endi +if $data41 != 4 then + return -1 +endi +if $data50 != @21-03-25 19:00:00.000@ then + return -1 +endi +if $data51 != 2 then + return -1 +endi +if $data60 != @21-03-25 10:00:00.000@ then + return -1 +endi +if $data61 != 14 then + return -1 +endi +if $data70 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data71 != 30 then + return -1 +endi +if $data80 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data81 != 24 then + return -1 +endi +if $data90 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data91 != 2 then + return -1 +endi + + +sql select add_one(f1) from stb1 limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi +if $data10 != 3 then + return -1 +endi + + +sql select sum_double(f1) from stb1 interval (1d) limit 2; +if $rows != 2 then + return -1 +endi + +if $data00 != @21-03-03 00:00:00.000@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-13 00:00:00.000@ then + return -1 +endi +if $data11 != 4 then + return -1 +endi + +sql select sum_double(f1) from stb1 group by id1; + +if $rows != 6 then + return -1 +endi + +if $data00 != 56 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data10 != 6 then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != 14 then + return -1 +endi +if $data21 != 3 then + return -1 +endi +if $data30 != 12 then + return -1 +endi +if $data31 != 5 then + return -1 +endi +if $data40 != 6 then + return -1 +endi +if $data41 != 6 then + return -1 +endi +if $data50 != 8 then + return -1 +endi +if $data51 != 9 then + return -1 +endi + + +sql select sum_double(f1) from tb1 where ts>="2021-03-23 17:00:00.000" and ts<="2021-03-24 20:00:00.000" interval (1h) sliding (30m); +if $rows != 7 then + return -1 +endi + +if $data00 != @21-03-23 16:30:00.000@ then + return -1 +endi +if $data01 != 2 then + return -1 +endi +if $data10 != @21-03-23 17:00:00.000@ then + return -1 +endi +if $data11 != 2 then + return -1 +endi +if $data20 != @21-03-23 18:30:00.000@ then + return -1 +endi +if $data21 != 4 then + return -1 +endi +if $data30 != @21-03-23 19:00:00.000@ then + return -1 +endi +if $data31 != 18 then + return -1 +endi +if $data40 != @21-03-23 19:30:00.000@ then + return -1 +endi +if $data41 != 14 then + return -1 +endi +if $data50 != @21-03-24 18:30:00.000@ then + return -1 +endi +if $data51 != 22 then + return -1 +endi +if $data60 != @21-03-24 19:00:00.000@ then + return -1 +endi +if $data61 != 22 then + return -1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + + + + + + diff --git a/tests/script/general/parser/where.sim b/tests/script/general/parser/where.sim index 6dfea3d2e70e75f82fd7bd48cd5578971e158de4..781b2725b5ff8d55e9cb1dd2f5de0d4a77275725 100644 --- a/tests/script/general/parser/where.sim +++ b/tests/script/general/parser/where.sim @@ -139,24 +139,24 @@ sql_error select * from $mt where c1 like 1 sql create table wh_mt1 (ts timestamp, c1 smallint, c2 int, c3 bigint, c4 float, c5 double, c6 tinyint, c7 binary(10), c8 nchar(10), c9 bool, c10 timestamp) tags (t1 binary(10), t2 smallint, t3 int, t4 bigint, t5 float, t6 double) sql create table wh_mt1_tb1 using wh_mt1 tags ('tb11', 1, 1, 1, 1, 1) sql insert into wh_mt1_tb1 values (now, 1, 1, 1, 1, 1, 1, 'binary', 'nchar', true, '2019-01-01 00:00:00.000') -sql_error select last(*) from wh_mt1 where c1 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c1 in ('1') -sql_error select last(*) from wh_mt1 where c2 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c2 in ('1') -sql_error select last(*) from wh_mt1 where c3 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c3 in ('1') -sql_error select last(*) from wh_mt1 where c4 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c4 in ('1') -sql_error select last(*) from wh_mt1 where c5 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c5 in ('1') -sql_error select last(*) from wh_mt1 where c6 in ('1') -sql_error select last(*) from wh_mt1_tb1 where c6 in ('1') +#sql_error select last(*) from wh_mt1 where c1 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c1 in ('1') +#sql_error select last(*) from wh_mt1 where c2 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c2 in ('1') +#sql_error select last(*) from wh_mt1 where c3 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c3 in ('1') +#sql_error select last(*) from wh_mt1 where c4 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c4 in ('1') +#sql_error select last(*) from wh_mt1 where c5 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c5 in ('1') +#sql_error select last(*) from wh_mt1 where c6 in ('1') +#sql_error select last(*) from wh_mt1_tb1 where c6 in ('1') #sql_error select last(*) from wh_mt1 where c7 in ('binary') #sql_error select last(*) from wh_mt1_tb1 where c7 in ('binary') #sql_error select last(*) from wh_mt1 where c8 in ('nchar') #sql_error select last(*) from wh_mt1_tb1 where c9 in (true, false) -sql_error select last(*) from wh_mt1 where c10 in ('2019-01-01 00:00:00.000') -sql_error select last(*) from wh_mt1_tb1 where c10 in ('2019-01-01 00:00:00.000') +#sql_error select last(*) from wh_mt1 where c10 in ('2019-01-01 00:00:00.000') +#sql_error select last(*) from wh_mt1_tb1 where c10 in ('2019-01-01 00:00:00.000') sql select last(*) from wh_mt1 where c1 = 1 if $rows != 1 then return -1 @@ -352,5 +352,24 @@ if $rows != 0 then return -1 endi - +print ==========================> td-4783,td-4792 +sql create table where_ts(ts timestamp, f int) +sql insert into where_ts values('2021-06-19 16:22:00', 1); +sql insert into where_ts values('2021-06-19 16:23:00', 2); +sql insert into where_ts values('2021-06-19 16:24:00', 3); +sql insert into where_ts values('2021-06-19 16:25:00', 1); +sql select * from (select * from where_ts) where ts<'2021-06-19 16:25:00' and ts>'2021-06-19 16:22:00' +if $rows != 2 then + return -1 +endi +print $data00, $data01 +if $data01 != 2 then + return -1 +endi +sql insert into where_ts values(now, 5); +sleep 10 +sql select * from (select * from where_ts) where ts +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + long long length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + + +#define TSDB_DATA_INT_NULL 0x80000000L +#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L + +void abs_max(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + int r = 0; + printf("abs_max input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); + if (itype == 5) { + r=*(long *)dataOutput; + *numOfOutput=0; + + for(i=0;i r) { + r = v; + } + } + + *(long *)dataOutput=r; + + printf("abs_max out, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput); + } +} + + + +void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) { + int i; + int r = 0; + printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf); + *numOfOutput=1; + printf("abs_max finalize, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput); +} + +void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { + int r = 0; + + if (numOfRows > 0) { + r = *((long *)data); + } + printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf); + for (int i = 1; i < numOfRows; ++i) { + printf("abs_max_merge %d - %ld\n", i, *((long *)data + i)); + if (*((long*)data + i) > r) { + r= *((long*)data + i); + } + } + + *(long*)dataOutput=r; + if (numOfRows > 0) { + *numOfOutput=1; + } else { + *numOfOutput=0; + } + + printf("abs_max_merge, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput); +} + + +int abs_max_init(SUdfInit* buf) { + printf("abs_max init\n"); + return 0; +} + + +void abs_max_destroy(SUdfInit* buf) { + printf("abs_max destroy\n"); +} + diff --git a/tests/script/sh/add_one.c b/tests/script/sh/add_one.c new file mode 100644 index 0000000000000000000000000000000000000000..e12cf8f26f6ddad67f9f7b091c033de46a3f6f50 --- /dev/null +++ b/tests/script/sh/add_one.c @@ -0,0 +1,33 @@ +#include +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + long long length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + +void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + int r = 0; + printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); + if (itype == 4) { + for(i=0;i> $TAOS_CFG +echo " " >> $TAOS_CFG echo "masterIp $MASTER_IP" >> $TAOS_CFG echo "dataDir $DATA_DIR" >> $TAOS_CFG echo "logDir $LOG_DIR" >> $TAOS_CFG diff --git a/tests/script/sh/demo.c b/tests/script/sh/demo.c new file mode 100644 index 0000000000000000000000000000000000000000..23d217444892e5bd8dc1c83b569dc22616c42e78 --- /dev/null +++ b/tests/script/sh/demo.c @@ -0,0 +1,112 @@ +#include +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + long long length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + +typedef struct SDemo{ + double sum; + int num; + short otype; +}SDemo; + +#define FLOAT_NULL 0x7FF00000 // it is an NAN +#define DOUBLE_NULL 0x7FFFFF0000000000L // it is an NAN + + +void demo(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + double r = 0; + SDemo *p = (SDemo *)interBuf; + SDemo *q = (SDemo *)dataOutput; + printf("demo input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, interBUf:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, interBuf, tsOutput, numOfOutput, buf); + + for(i=0;isum += r*r; + } + + p->otype = otype; + p->num += numOfRows; + + q->sum = p->sum; + q->num = p->num; + q->otype = p->otype; + + *numOfOutput=1; + + printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput); +} + + +void demo_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { + int i; + SDemo *p = (SDemo *)data; + SDemo res = {0}; + printf("demo_merge input data:%p, rows:%d, dataoutput:%p, numOfOutput:%p, buf:%p\n", data, numOfRows, dataOutput, numOfOutput, buf); + + for(i=0;isum * p->sum; + res.num += p->num; + p++; + } + + p->sum = res.sum; + p->num = res.num; + + *numOfOutput=1; + + printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput); +} + + + +void demo_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) { + SDemo *p = (SDemo *)interBuf; + printf("demo_finalize interbuf:%p, numOfOutput:%p, buf:%p, sum:%f, num:%d\n", interBuf, numOfOutput, buf, p->sum, p->num); + if (p->otype == 6) { + if (p->num != 30000) { + *(unsigned int *)dataOutput = FLOAT_NULL; + } else { + *(float *)dataOutput = (float)(p->sum / p->num); + } + printf("finalize values:%f\n", *(float *)dataOutput); + } else if (p->otype == 7) { + if (p->num != 30000) { + *(unsigned long long *)dataOutput = DOUBLE_NULL; + } else { + *(double *)dataOutput = (double)(p->sum / p->num); + } + printf("finalize values:%f\n", *(double *)dataOutput); + } + + *numOfOutput=1; + + printf("demo finalize, numOfOutput:%d\n", *numOfOutput); +} + + +int demo_init(SUdfInit* buf) { + printf("demo init\n"); + return 0; +} + + +void demo_destroy(SUdfInit* buf) { + printf("demo destroy\n"); +} + diff --git a/tests/script/sh/demo.lua b/tests/script/sh/demo.lua new file mode 100644 index 0000000000000000000000000000000000000000..c5e5582fc30b58db30a5b18faa4ccfd0a5f656d0 --- /dev/null +++ b/tests/script/sh/demo.lua @@ -0,0 +1,43 @@ +funcName = "test" + +global = {} + +function test_init() + return global +end + +function test_add(rows, ans, key) + t = {} + t["sum"] = 0.0 + t["num"] = 0 + for i=1, #rows do + t["sum"] = t["sum"] + rows[i] * rows[i] + end + t["num"] = #rows + + + if (ans[key] ~= nil) + then + ans[key]["sum"] = ans[key]["sum"] + t["sum"] + ans[key]["num"] = ans[key]["num"] + t["num"] + else + ans[key] = t + end + + return ans; +end + +function test_finalize(ans, key) + local ret = 0.0 + + if (ans[key] ~= nil and ans[key]["num"] == 30000) + then + ret = ans[key]["sum"]/ans[key]["num"] + ans[key]["sum"] = 0.0 + ans[key]["num"] = 0 + else + ret = inf + end + + return ret, ans +end diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 9b61a33d45f69a200b3f364f1553bdec5a3f52b5..cde27d7dc3fa00258d9d9d50ee3dfc82f858b138 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -2,16 +2,18 @@ echo "Executing deploy.sh" -if [ $# != 4 ]; then +if [ $# != 4 ]; then echo "argument list need input : " echo " -n nodeName" echo " -i nodePort" exit 1 fi +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` NODE_NAME= NODE= -while getopts "n:i:" arg +while getopts "n:i:" arg do case $arg in n) @@ -29,7 +31,7 @@ done SCRIPT_DIR=`dirname $0` cd $SCRIPT_DIR/../ SCRIPT_DIR=`pwd` -echo "SCRIPT_DIR: $SCRIPT_DIR" +echo "SCRIPT_DIR: $SCRIPT_DIR" IN_TDINTERNAL="community" if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then @@ -41,10 +43,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/exec-default.sh b/tests/script/sh/exec-default.sh index 1c84a6b10ed3f958e096d32eda727441ff2591da..f648315c6745f63cfba9eb06ecfd53a9ccef1fed 100755 --- a/tests/script/sh/exec-default.sh +++ b/tests/script/sh/exec-default.sh @@ -1,6 +1,6 @@ #!/bin/bash -# if [ $# != 4 || $# != 5 ]; then +# if [ $# != 4 || $# != 5 ]; then # echo "argument list need input : " # echo " -n nodeName" # echo " -s start/stop" @@ -8,10 +8,12 @@ # exit 1 # fi +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` NODE_NAME= EXEC_OPTON= CLEAR_OPTION="false" -while getopts "n:s:u:x:ct" arg +while getopts "n:s:u:x:ct" arg do case $arg in n) @@ -52,10 +54,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/exec-no-random-fail.sh b/tests/script/sh/exec-no-random-fail.sh index 2bd0a64923f73df010e40f427d40cb9e212480d7..e01b18a8e6bb0d4631a28c64c6accd57bceb9076 100755 --- a/tests/script/sh/exec-no-random-fail.sh +++ b/tests/script/sh/exec-no-random-fail.sh @@ -1,6 +1,6 @@ #!/bin/bash -# if [ $# != 4 || $# != 5 ]; then +# if [ $# != 4 || $# != 5 ]; then # echo "argument list need input : " # echo " -n nodeName" # echo " -s start/stop" @@ -8,10 +8,12 @@ # exit 1 # fi +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` NODE_NAME= EXEC_OPTON= CLEAR_OPTION="false" -while getopts "n:s:u:x:ct" arg +while getopts "n:s:u:x:ct" arg do case $arg in n) @@ -52,10 +54,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build @@ -78,23 +86,23 @@ if [ -f "$TAOS_FLAG" ]; then EXE_DIR=/usr/local/bin/taos fi -if [ "$CLEAR_OPTION" = "clear" ]; then - echo rm -rf $MGMT_DIR $TSDB_DIR +if [ "$CLEAR_OPTION" = "clear" ]; then + echo rm -rf $MGMT_DIR $TSDB_DIR rm -rf $TSDB_DIR rm -rf $MGMT_DIR fi -if [ "$EXEC_OPTON" = "start" ]; then +if [ "$EXEC_OPTON" = "start" ]; then echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR - - if [ "$SHELL_OPTION" = "true" ]; then + + if [ "$SHELL_OPTION" = "true" ]; then TT=`date +%s` mkdir ${LOG_DIR}/${TT} - nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & + nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 & else - nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 0 > /dev/null 2>&1 & + nohup $EXE_DIR/taosd -c $CFG_DIR --random-file-fail-factor 0 > /dev/null 2>&1 & fi - + else #relative path RCFG_DIR=sim/$NODE_NAME/cfg @@ -110,6 +118,6 @@ else fi sleep 1 PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'` - done + done fi diff --git a/tests/script/sh/exec-random-fail.sh b/tests/script/sh/exec-random-fail.sh index 3761498859591188799d1ad3814ed3b01372269d..1f31899e3ac26861fc1860d10cb0a4899ff7bf36 100755 --- a/tests/script/sh/exec-random-fail.sh +++ b/tests/script/sh/exec-random-fail.sh @@ -1,6 +1,6 @@ #!/bin/bash -# if [ $# != 4 || $# != 5 ]; then +# if [ $# != 4 || $# != 5 ]; then # echo "argument list need input : " # echo " -n nodeName" # echo " -s start/stop" @@ -8,10 +8,12 @@ # exit 1 # fi +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` NODE_NAME= EXEC_OPTON= CLEAR_OPTION="false" -while getopts "n:s:u:x:ct" arg +while getopts "n:s:u:x:ct" arg do case $arg in n) @@ -52,10 +54,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/exec.sh b/tests/script/sh/exec.sh index 1c84a6b10ed3f958e096d32eda727441ff2591da..80b8cda428da72daf55fc6e0d4c47867ce191d35 100755 --- a/tests/script/sh/exec.sh +++ b/tests/script/sh/exec.sh @@ -1,6 +1,6 @@ #!/bin/bash -# if [ $# != 4 || $# != 5 ]; then +# if [ $# != 4 || $# != 5 ]; then # echo "argument list need input : " # echo " -n nodeName" # echo " -s start/stop" @@ -8,10 +8,13 @@ # exit 1 # fi +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` + NODE_NAME= EXEC_OPTON= CLEAR_OPTION="false" -while getopts "n:s:u:x:ct" arg +while getopts "n:s:u:x:ct" arg do case $arg in n) @@ -52,10 +55,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/exec_tarbitrator.sh b/tests/script/sh/exec_tarbitrator.sh index 496b3e4ed6011ace67e5f33330d99bf02e611673..e985bd65856025b2db8dfef724fdc652b2a03392 100755 --- a/tests/script/sh/exec_tarbitrator.sh +++ b/tests/script/sh/exec_tarbitrator.sh @@ -1,13 +1,15 @@ #!/bin/bash -# if [ $# != 2 || $# != 3 ]; then +# if [ $# != 2 || $# != 3 ]; then # echo "argument list need input : " # echo " -s start/stop" # exit 1 # fi +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` EXEC_OPTON= -while getopts "n:s:u:x:ct" arg +while getopts "n:s:u:x:ct" arg do case $arg in n) @@ -49,10 +51,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/move_dnode.sh b/tests/script/sh/move_dnode.sh index d6dc4bc3eb24fe094067cffcb51a7c335a512a94..d3650c18ad0f49185ce1e1268273b8a44e3cdc14 100755 --- a/tests/script/sh/move_dnode.sh +++ b/tests/script/sh/move_dnode.sh @@ -2,10 +2,13 @@ echo "Executing move_dnode.sh" +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` + SCRIPT_DIR=`dirname $0` cd $SCRIPT_DIR/../ SCRIPT_DIR=`pwd` -echo "SCRIPT_DIR: $SCRIPT_DIR" +echo "SCRIPT_DIR: $SCRIPT_DIR" IN_TDINTERNAL="community" if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then @@ -17,10 +20,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/mv_old_data.sh b/tests/script/sh/mv_old_data.sh index 606ee171f1e6b8605fd19d542e0b0dce5dfdda28..3f4be6714fd757ee60d21ee97be5d411e3f95bdd 100755 --- a/tests/script/sh/mv_old_data.sh +++ b/tests/script/sh/mv_old_data.sh @@ -2,10 +2,13 @@ echo "Executing mv_old_data.sh" +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` + SCRIPT_DIR=`dirname $0` cd $SCRIPT_DIR/../ SCRIPT_DIR=`pwd` -echo "SCRIPT_DIR: $SCRIPT_DIR" +echo "SCRIPT_DIR: $SCRIPT_DIR" IN_TDINTERNAL="community" if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then @@ -17,10 +20,16 @@ fi TAOS_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TAOS_DIR/$BIN_DIR/build diff --git a/tests/script/sh/prepare_udf.sh b/tests/script/sh/prepare_udf.sh new file mode 100644 index 0000000000000000000000000000000000000000..a856b96c987ff63dd2c19a30ff007be9cd5b17f9 --- /dev/null +++ b/tests/script/sh/prepare_udf.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +echo -n "hello" > /tmp/normal +echo -n "" > /tmp/empty +dd if=/dev/zero bs=3584 of=/tmp/big count=1 + +rm -rf /tmp/sum_double.so /tmp/add_one.so +touch /tmp/normal + +gcc -g -O0 -fPIC -shared sh/sum_double.c -o /tmp/sum_double.so +gcc -g -O0 -fPIC -shared sh/add_one.c -o /tmp/add_one.so +gcc -g -O0 -fPIC -shared sh/demo.c -o /tmp/demo.so +gcc -g -O0 -fPIC -shared sh/abs_max.c -o /tmp/abs_max.so diff --git a/tests/script/sh/scalar_demo.lua b/tests/script/sh/scalar_demo.lua new file mode 100644 index 0000000000000000000000000000000000000000..cecaff90b01534316821b0a025a3fc66f9b219cf --- /dev/null +++ b/tests/script/sh/scalar_demo.lua @@ -0,0 +1,14 @@ +funcName = "test" + +ans = 1 + +function test_init() + return ans +end + +function test_add(rows, ans) + for i=1, #rows do + rows[i] = rows[i] + ans + end + return rows +end diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index 1a6e7153c39d1be84b2d8a4bf13338564d685a2f..430f39901e62415e780999171139fcd961cdd54c 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -1,24 +1,36 @@ #!/bin/sh +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` + PID=`ps -ef|grep /usr/bin/taosd | grep -v grep | awk '{print $2}'` -if [ -n "$PID" ]; then - echo systemctl stop taosd - systemctl stop taosd +if [ -n "$PID" ]; then + echo systemctl stop taosd + systemctl stop taosd fi - + PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` while [ -n "$PID" ]; do - echo kill -9 $PID - pkill -9 taosd - fuser -k -n tcp 6030 + echo kill -9 $PID + pkill -9 taosd + echo "Killing processes locking on port 6030" + if [[ "$OS_TYPE" != "Darwin" ]]; then + fuser -k -n tcp 6030 + else + lsof -nti:6030 | xargs kill -9 + fi PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` done PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'` while [ -n "$PID" ]; do - echo kill -9 $PID - pkill -9 tarbitrator - fuser -k -n tcp 6040 + echo kill -9 $PID + pkill -9 tarbitrator + if [[ "$OS_TYPE" != "Darwin" ]]; then + fuser -k -n tcp 6040 + else + lsof -nti:6040 | xargs kill -9 + fi PID=`ps -ef|grep -w tarbitrator | grep -v grep | awk '{print $2}'` done diff --git a/tests/script/sh/sum_double.c b/tests/script/sh/sum_double.c new file mode 100644 index 0000000000000000000000000000000000000000..b4b0b81a0677b7de102dcfd228d09e53e3edda62 --- /dev/null +++ b/tests/script/sh/sum_double.c @@ -0,0 +1,84 @@ +#include +#include +#include + +typedef struct SUdfInit{ + int maybe_null; /* 1 if function can return NULL */ + int decimals; /* for real functions */ + long long length; /* For string functions */ + char *ptr; /* free pointer for function data */ + int const_item; /* 0 if result is independent of arguments */ +} SUdfInit; + +#define TSDB_DATA_INT_NULL 0x80000000L + + +void sum_double(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput, + int* numOfOutput, short otype, short obytes, SUdfInit* buf) { + int i; + int r = 0; + printf("sum_double input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf); + if (itype == 4) { + r=*(int *)dataOutput; + *numOfOutput=0; + + for(i=0;iptr)=*(int*)dataOutput*2; + *(int*)dataOutput=*(int*)(buf->ptr); + printf("sum_double finalize, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput); +} + +void sum_double_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) { + int r = 0; + int sum = 0; + + printf("sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf); + for (int i = 0; i < numOfRows; ++i) { + printf("sum_double_merge %d - %d\n", i, *((int*)data + i)); + sum +=*((int*)data + i); + } + + *(int*)dataOutput+=sum; + if (numOfRows > 0) { + *numOfOutput=1; + } else { + *numOfOutput=0; + } + + printf("sum_double_merge, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput); +} + + +int sum_double_init(SUdfInit* buf) { + buf->maybe_null=1; + buf->ptr = malloc(sizeof(int)); + printf("sum_double init\n"); + return 0; +} + + +void sum_double_destroy(SUdfInit* buf) { + free(buf->ptr); + printf("sum_double destroy\n"); +} + diff --git a/tests/script/test.sh b/tests/script/test.sh index a092a38a2d9491b49b64c80a42c74ebdd41236f3..29a15db4dc953818afa6de9abb3f4f8edc42173d 100755 --- a/tests/script/test.sh +++ b/tests/script/test.sh @@ -1,8 +1,8 @@ #!/bin/bash ################################################## -# -# Do simulation test +# +# Do simulation test # ################################################## @@ -14,6 +14,8 @@ RELEASE=0 ASYNC=0 VALGRIND=0 UNIQUE=0 +UNAME_BIN=`which uname` +OS_TYPE=`$UNAME_BIN` while getopts "f:avu" arg do case $arg in @@ -51,10 +53,16 @@ fi TOP_DIR=`pwd` TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` +if [[ "$OS_TYPE" != "Darwin" ]]; then + cut_opt="--field=" +else + cut_opt="-f " +fi + if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2,3` else - BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' ${cut_opt}2` fi BUILD_DIR=$TOP_DIR/$BIN_DIR/build @@ -117,7 +125,7 @@ echo "wal 0" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG echo "locale en_US.UTF-8" >> $TAOS_CFG echo "enableCoreFile 1" >> $TAOS_CFG -echo " " >> $TAOS_CFG +echo " " >> $TAOS_CFG ulimit -n 600000 ulimit -c unlimited diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim index 1d67a7f86b2aef94a90b74d3d0e6a07dc0cf3ef2..ae206744c4e93ab7cebd5f4db7d8d4b84ad5ebbb 100644 --- a/tests/script/unique/http/admin.sim +++ b/tests/script/unique/http/admin.sim @@ -34,6 +34,7 @@ print =============== step1 - login system_content curl 127.0.0.1:7111/admin/ print 1-> $system_content if $system_content != @{"status":"error","code":4357,"desc":"no auth info input"}@ then + print actual: $system_content return -1 endi @@ -149,6 +150,8 @@ endi system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' 127.0.0.1:7111/admin/all print curl 127.0.0.1:7111/admin/all -----> $system_content if $system_content != @{"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11}@ then + print actual: $system_content + print expect =======> {"status":"succ","head":["ts","i"],"data":[["2017-12-25 21:28:41.022",1],["2017-12-25 21:28:42.022",2],["2017-12-25 21:28:43.022",3],["2017-12-25 21:28:44.022",4],["2017-12-25 21:28:45.022",5],["2017-12-25 21:28:46.022",6],["2017-12-25 21:28:47.022",7],["2017-12-25 21:28:48.022",8],["2017-12-25 21:28:49.022",9],["2017-12-25 21:28:50.022",10],["2017-12-25 21:28:51.022",11]],"rows":11} return -1 endi diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 2eb8ee1614b286f3827705865cf073a7eded0c88..2702d192d3f47022f05888f90ca89c4ef533fe44 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h index 58314d2e5055f0716793342157f6c82d6d729b29..2e19dde3d9c52c20705d131f471a2e0e389589e4 100644 --- a/tests/tsim/inc/sim.h +++ b/tests/tsim/inc/sim.h @@ -87,6 +87,8 @@ enum { SIM_CMD_RESTFUL, SIM_CMD_TEST, SIM_CMD_RETURN, + SIM_CMD_LINE_INSERT, + SIM_CMD_LINE_INSERT_ERROR, SIM_CMD_END }; @@ -172,6 +174,8 @@ bool simExecuteSqlCmd(SScript *script, char *option); bool simExecuteSqlErrorCmd(SScript *script, char *rest); bool simExecuteSqlSlowCmd(SScript *script, char *option); bool simExecuteRestfulCmd(SScript *script, char *rest); +bool simExecuteLineInsertCmd(SScript *script, char *option); +bool simExecuteLineInsertErrorCmd(SScript *script, char *option); void simVisuallizeOption(SScript *script, char *src, char *dst); #endif \ No newline at end of file diff --git a/tests/tsim/src/simExe.c b/tests/tsim/src/simExe.c index 7d74946e939bb5f34f81ef6a6aee56a31c4a6cfe..a05f46ce0de54628f289c937e959ccc3337e00a9 100644 --- a/tests/tsim/src/simExe.c +++ b/tests/tsim/src/simExe.c @@ -1067,3 +1067,49 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) { return false; } + +bool simExecuteLineInsertCmd(SScript *script, char *rest) { + char buf[TSDB_MAX_BINARY_LEN]; + + simVisuallizeOption(script, rest, buf); + rest = buf; + + SCmdLine *line = &script->lines[script->linePos]; + + simInfo("script:%s, %s", script->fileName, rest); + simLogSql(buf, true); + char * lines[] = {rest}; + int32_t ret = taos_insert_lines(script->taos, lines, 1); + if (ret == TSDB_CODE_SUCCESS) { + simDebug("script:%s, taos:%p, %s executed. success.", script->fileName, script->taos, rest); + script->linePos++; + return true; + } else { + sprintf(script->error, "lineNum: %d. line: %s failed, ret:%d:%s", line->lineNum, rest, + ret & 0XFFFF, tstrerror(ret)); + return false; + } +} + +bool simExecuteLineInsertErrorCmd(SScript *script, char *rest) { + char buf[TSDB_MAX_BINARY_LEN]; + + simVisuallizeOption(script, rest, buf); + rest = buf; + + SCmdLine *line = &script->lines[script->linePos]; + + simInfo("script:%s, %s", script->fileName, rest); + simLogSql(buf, true); + char * lines[] = {rest}; + int32_t ret = taos_insert_lines(script->taos, lines, 1); + if (ret == TSDB_CODE_SUCCESS) { + sprintf(script->error, "script:%s, taos:%p, %s executed. expect failed, but success.", script->fileName, script->taos, rest); + script->linePos++; + return false; + } else { + simDebug("lineNum: %d. line: %s failed, ret:%d:%s. Expect failed, so success", line->lineNum, rest, + ret & 0XFFFF, tstrerror(ret)); + return true; + } +} diff --git a/tests/tsim/src/simMain.c b/tests/tsim/src/simMain.c index 6a9d96bc3b2e6056c73fad73b1fa9f1b7dee6cbf..7d74c62c7daf391fed1bf1afac233f51b84c8f0b 100644 --- a/tests/tsim/src/simMain.c +++ b/tests/tsim/src/simMain.c @@ -35,7 +35,7 @@ int32_t main(int32_t argc, char *argv[]) { for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0 && i < argc - 1) { - tstrncpy(configDir, argv[++i], MAX_FILE_NAME_LEN); + tstrncpy(configDir, argv[++i], 128); } else if (strcmp(argv[i], "-f") == 0 && i < argc - 1) { strcpy(scriptFile, argv[++i]); } else if (strcmp(argv[i], "-a") == 0) { @@ -75,4 +75,4 @@ int32_t main(int32_t argc, char *argv[]) { simInfo("execute result %d", ret); return ret; -} \ No newline at end of file +} diff --git a/tests/tsim/src/simParse.c b/tests/tsim/src/simParse.c index b909f5bd8fc10bea09afd65dc504ae35d6de3505..1acdcd2ac6eb0ecb66e2977dee7577393ed242ef 100644 --- a/tests/tsim/src/simParse.c +++ b/tests/tsim/src/simParse.c @@ -838,6 +838,38 @@ bool simParseRunBackCmd(char *rest, SCommand *pCmd, int32_t lineNum) { return true; } +bool simParseLineInsertCmd(char* rest, SCommand* pCmd, int32_t lineNum) { + int32_t expLen; + + rest++; + cmdLine[numOfLines].cmdno = SIM_CMD_LINE_INSERT; + cmdLine[numOfLines].lineNum = lineNum; + cmdLine[numOfLines].optionOffset = optionOffset; + expLen = (int32_t)strlen(rest); + memcpy(optionBuffer + optionOffset, rest, expLen); + optionOffset += expLen + 1; + *(optionBuffer + optionOffset - 1) = 0; + + numOfLines++; + return true; +} + +bool simParseLineInsertErrorCmd(char* rest, SCommand* pCmd, int32_t lineNum) { + int32_t expLen; + + rest++; + cmdLine[numOfLines].cmdno = SIM_CMD_LINE_INSERT; + cmdLine[numOfLines].lineNum = lineNum; + cmdLine[numOfLines].optionOffset = optionOffset; + expLen = (int32_t)strlen(rest); + memcpy(optionBuffer + optionOffset, rest, expLen); + optionOffset += expLen + 1; + *(optionBuffer + optionOffset - 1) = 0; + + numOfLines++; + return true; +} + void simInitsimCmdList() { int32_t cmdno; memset(simCmdList, 0, SIM_CMD_END * sizeof(SCommand)); @@ -1049,4 +1081,20 @@ void simInitsimCmdList() { simCmdList[cmdno].parseCmd = simParseReturnCmd; simCmdList[cmdno].executeCmd = simExecuteReturnCmd; simAddCmdIntoHash(&(simCmdList[cmdno])); + + cmdno = SIM_CMD_LINE_INSERT; + simCmdList[cmdno].cmdno = cmdno; + strcpy(simCmdList[cmdno].name, "line_insert"); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].parseCmd = simParseLineInsertCmd; + simCmdList[cmdno].executeCmd = simExecuteLineInsertCmd; + simAddCmdIntoHash(&(simCmdList[cmdno])); + + cmdno = SIM_CMD_LINE_INSERT_ERROR; + simCmdList[cmdno].cmdno = cmdno; + strcpy(simCmdList[cmdno].name, "line_insert_error"); + simCmdList[cmdno].nlen = (int16_t)strlen(simCmdList[cmdno].name); + simCmdList[cmdno].parseCmd = simParseLineInsertErrorCmd; + simCmdList[cmdno].executeCmd = simExecuteLineInsertErrorCmd; + simAddCmdIntoHash(&(simCmdList[cmdno])); }