diff --git a/.gitmodules b/.gitmodules index dbb02d4ef7ed65d11418e271cac7e61b95c2a482..7edcdff5d3dd805ec6b222915688940c7bd7dcb9 100644 --- a/.gitmodules +++ b/.gitmodules @@ -16,6 +16,9 @@ [submodule "deps/TSZ"] path = deps/TSZ url = https://github.com/taosdata/TSZ.git -[submodule "src/plugins/blm3"] - path = src/plugins/blm3 - url = https://github.com/taosdata/blm3 +[submodule "deps/avro"] + path = deps/avro + url = https://github.com/apache/avro +[submodule "src/plugins/taosadapter"] + path = src/plugins/taosadapter + url = https://github.com/taosdata/taosadapter diff --git a/CMakeLists.txt b/CMakeLists.txt index 75f98f96bcb26ae12fd32b56f2533db3001c6ae5..547455d07b6ba25ac58ae5e4851c5cd5b08e3c60 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,6 +15,26 @@ ELSE () CMAKE_MINIMUM_REQUIRED(VERSION 3.0) ENDIF () +if(NOT WIN32) + string(ASCII 27 Esc) + set(ColourReset "${Esc}[m") + set(ColourBold "${Esc}[1m") + set(Red "${Esc}[31m") + set(Green "${Esc}[32m") + set(Yellow "${Esc}[33m") + set(Blue "${Esc}[34m") + set(Magenta "${Esc}[35m") + set(Cyan "${Esc}[36m") + set(White "${Esc}[37m") + set(BoldRed "${Esc}[1;31m") + set(BoldGreen "${Esc}[1;32m") + set(BoldYellow "${Esc}[1;33m") + set(BoldBlue "${Esc}[1;34m") + set(BoldMagenta "${Esc}[1;35m") + set(BoldCyan "${Esc}[1;36m") + set(BoldWhite "${Esc}[1;37m") +endif() + SET(TD_ACCOUNT FALSE) SET(TD_ADMIN FALSE) SET(TD_GRANT FALSE) diff --git a/Jenkinsfile b/Jenkinsfile index f0f3e0d122ad470cce0ef9586e01fe9431ccfa8d..9cc65d24f8aae3a97890e6676ff1091d32f7dc59 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -107,7 +107,147 @@ def pre_test(){ make > /dev/null make install > /dev/null cd ${WKC}/tests - pip3 install ${WKC}/src/connector/python/ || echo "not install" + pip3 install ${WKC}/src/connector/python/ + ''' + return 1 +} +def pre_test_noinstall(){ + sh'hostname' + sh''' + cd ${WKC} + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WKC} + git checkout master + ''' + } + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WKC} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WKC} + git checkout develop + ''' + } + } + sh''' + cd ${WKC} + git pull >/dev/null + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + git clean -dfx + git submodule update --init --recursive + cd ${WK} + git reset --hard HEAD~10 + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WK} + git checkout master + ''' + } + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WK} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WK} + git checkout develop + ''' + } + } + sh ''' + cd ${WK} + git pull >/dev/null + + export TZ=Asia/Harbin + date + git clean -dfx + mkdir debug + cd debug + cmake .. > /dev/null + make + ''' + return 1 +} +def pre_test_mac(){ + sh'hostname' + sh''' + cd ${WKC} + git reset --hard HEAD~10 >/dev/null + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WKC} + git checkout master + ''' + } + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WKC} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WKC} + git checkout develop + ''' + } + } + sh''' + cd ${WKC} + git pull >/dev/null + git fetch origin +refs/pull/${CHANGE_ID}/merge + git checkout -qf FETCH_HEAD + git clean -dfx + git submodule update --init --recursive + cd ${WK} + git reset --hard HEAD~10 + ''' + script { + if (env.CHANGE_TARGET == 'master') { + sh ''' + cd ${WK} + git checkout master + ''' + } + else if(env.CHANGE_TARGET == '2.0'){ + sh ''' + cd ${WK} + git checkout 2.0 + ''' + } + else{ + sh ''' + cd ${WK} + git checkout develop + ''' + } + } + sh ''' + cd ${WK} + git pull >/dev/null + + export TZ=Asia/Harbin + date + git clean -dfx + mkdir debug + cd debug + cmake .. > /dev/null + cmake --build . ''' return 1 } @@ -460,31 +600,61 @@ pipeline { stage('arm64centos7') { agent{label " arm64centos7 "} steps { - pre_test() + pre_test_noinstall() } } stage('arm64centos8') { agent{label " arm64centos8 "} steps { - pre_test() + pre_test_noinstall() } } stage('arm32bionic') { agent{label " arm32bionic "} steps { - pre_test() + pre_test_noinstall() } } stage('arm64bionic') { agent{label " arm64bionic "} steps { - pre_test() + pre_test_noinstall() } } stage('arm64focal') { agent{label " arm64focal "} steps { - pre_test() + pre_test_noinstall() + } + } + stage('centos7') { + agent{label " centos7 "} + steps { + pre_test_noinstall() + } + } + stage('ubuntu:trusty') { + agent{label " trusty "} + steps { + pre_test_noinstall() + } + } + stage('ubuntu:xenial') { + agent{label " xenial "} + steps { + pre_test_noinstall() + } + } + stage('ubuntu:bionic') { + agent{label " bionic "} + steps { + pre_test_noinstall() + } + } + stage('Mac_build') { + agent{label " catalina "} + steps { + pre_test_mac() } } diff --git a/README.md b/README.md index c821bdc031fc3125e7afdfd2f8a9c2878e51f505..edca04afd486687ea8653e955ae50da457f77ab9 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ mkdir debug && cd debug cmake .. && cmake --build . ``` -Note TDengine 2.3.0.0 and later use a component named 'blm3' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The blm3 is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull blm3 source code. Please install go language 1.14 or above for compiling blm3. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem. +Note TDengine 2.3.x.0 and later use a component named 'taosadapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosadapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosadapter source code. Please install go language 1.14 or above for compiling taosadapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem. ``` go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.cn,direct diff --git a/cmake/define.inc b/cmake/define.inc index bb6b285f268a6476c79fb599e76b1fd0435173b5..b381853eba57aa7b9efb905790e77b1d1fdcf900 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) IF (TD_ACCOUNT) @@ -121,14 +121,13 @@ IF (TD_MIPS_32) SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () -IF (TD_APLHINE) +IF (TD_ALPINE) SET(COMMON_FLAGS "${COMMON_FLAGS} -largp") link_libraries(/usr/lib/libargp.a) ADD_DEFINITIONS(-D_ALPINE) MESSAGE(STATUS "aplhine is defined") ENDIF () -MESSAGE("before BUILD_HTTP: " ${BUILD_HTTP}) IF ("${BUILD_HTTP}" STREQUAL "") IF (TD_LINUX) IF (TD_ARM_32) @@ -140,16 +139,27 @@ IF ("${BUILD_HTTP}" STREQUAL "") SET(BUILD_HTTP "true") ENDIF () ENDIF () -MESSAGE("after BUILD_HTTP: " ${BUILD_HTTP}) IF (${BUILD_HTTP} MATCHES "true") SET(TD_BUILD_HTTP TRUE) +ELSEIF (${BUILD_HTTP} MATCHES "false") + SET(TD_BUILD_HTTP FALSE) ENDIF () IF (TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF () +IF ("${AVRO_SUPPORT}" MATCHES "true") + SET(TD_AVRO_SUPPORT TRUE) +ELSEIF ("${AVRO_SUPPORT}" MATCHES "false") + SET(TD_AVRO_SUPPORT FALSE) +ENDIF () + +IF (TD_AVRO_SUPPORT) + ADD_DEFINITIONS(-DAVRO_SUPPORT) +ENDIF () + IF (TD_LINUX) ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_LINUX) @@ -162,11 +172,14 @@ IF (TD_LINUX) ENDIF () IF (TD_MEMORY_SANITIZER) - SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG") - MESSAGE(STATUS "memory sanitizer detected as true") + IF (TD_ARCHLINUX) + SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG") + ELSE () + SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG") + ENDIF () + MESSAGE(STATUS "${BoldRed}Will compile with memory sanitizer! ${ColourReset}") ELSE () SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG") - MESSAGE(STATUS "memory sanitizer detected as false") ENDIF () SET(RELEASE_FLAGS "-O3 -Wno-error") diff --git a/cmake/env.inc b/cmake/env.inc index 5ee0b2983c0394c3e3aad26a622bdd2e6247c4be..1c594cd4be229cf259d76f9612b35fafde46221c 100755 --- a/cmake/env.inc +++ b/cmake/env.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) SET(CMAKE_C_STANDARD 11) diff --git a/cmake/input.inc b/cmake/input.inc index 5bd1a7bed6fe9b0c7dc51c46870d8109462eae81..0812711a5824ce0b328374fcdd04fc5f229ad01c 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) IF (${ACCOUNT} MATCHES "true") @@ -92,6 +92,8 @@ ENDIF () SET(TD_BUILD_HTTP FALSE) +SET(TD_AVRO_SUPPORT FALSE) + SET(TD_MEMORY_SANITIZER FALSE) IF (${MEMORY_SANITIZER} MATCHES "true") SET(TD_MEMORY_SANITIZER TRUE) diff --git a/cmake/install.inc b/cmake/install.inc index 9ecd9bcd4fa722dd039170ef30220679cedf65b1..c90aa3f9511e416106309e603853028e7096f082 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -35,7 +35,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.35-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.36-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/platform.inc b/cmake/platform.inc index a78082a1fc62a8ad66c54dcf005e3e15edf5f5f0..328c5f23ee95af54daa7e4a925c33ce09acd3cfb 100755 --- a/cmake/platform.inc +++ b/cmake/platform.inc @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) # @@ -21,7 +21,7 @@ SET(TD_LINUX FALSE) SET(TD_ARM_32 FALSE) SET(TD_MIPS_64 FALSE) SET(TD_MIPS_32 FALSE) - SET(TD_APLHINE FALSE) + SET(TD_ALPINE FALSE) SET(TD_NINGSI FALSE) SET(TD_NINGSI_60 FALSE) SET(TD_NINGSI_80 FALSE) @@ -36,7 +36,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") # Get OS information and store in variable TD_OS_INFO. # execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh) - execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO) + execute_process(COMMAND sh ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO) MESSAGE(STATUS "The current os is " ${TD_OS_INFO}) SET(TD_LINUX TRUE) @@ -52,8 +52,13 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") ENDIF () IF (${TD_OS_INFO} MATCHES "Alpine") - SET(TD_APLHINE TRUE) - MESSAGE(STATUS "The current OS is Alpine, append extra flags") + SET(TD_ALPINE TRUE) + MESSAGE(STATUS "The current OS is Alpine Linux, append extra flags") + ELSEIF (${TD_OS_INFO} MATCHES "Arch") + SET(TD_ARCHLINUX TRUE) + MESSAGE(STATUS "The current OS is Arch Linux") + ELSE () + MESSAGE(STATUS "Ths distro is " ${TD_OS_INFO}) ENDIF() ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") SET(TD_DARWIN TRUE) @@ -155,7 +160,7 @@ ELSEIF (${OSTYPE} MATCHES "Linux") MESSAGE(STATUS "input osType: Linux") ELSEIF (${OSTYPE} MATCHES "Alpine") MESSAGE(STATUS "input osType: Alpine") - SET(TD_APLHINE TRUE) + SET(TD_ALPINE TRUE) ELSE () MESSAGE(STATUS "The user specified osType is unknown: " ${OSTYPE}) ENDIF () diff --git a/cmake/version.inc b/cmake/version.inc index 1d3b25e9237ef507811fa234dda4211acd6eb885..94ff39f5e655d89b16b57a4b8c8fbe275c82a49a 100755 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -1,10 +1,10 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20) +CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.3.0.0") + SET(TD_VER_NUMBER "2.3.1.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index 45828245e2d541114a2ae0a287e0c6acbd0d42be..773a791a2527712270f569d5c04aa7f8ef066e40 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT) ADD_SUBDIRECTORY(MQTT-C) ENDIF () +IF (TD_AVRO_SUPPORT) + MESSAGE("") + MESSAGE("${Green} ENABLE avro format support ${ColourReset}") + MESSAGE("") + include(ExternalProject) + ExternalProject_Add( + apache-avro + PREFIX "avro" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c + BUILD_IN_SOURCE 1 + PATCH_COMMAND + COMMAND git clean -f -d + COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt + CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build + ) +ELSE () + MESSAGE("") + MESSAGE("${Yellow} NO avro format support ${ColourReset}") + MESSAGE("") +ENDIF () + IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + MESSAGE("") + MESSAGE("${Green} ENABLE jemalloc ${ColourReset}") + MESSAGE("") MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR}) MESSAGE("binary dir:" ${CMAKE_BINARY_DIR}) - include(ExternalProject) ExternalProject_Add(jemalloc PREFIX "jemalloc" SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc @@ -39,5 +65,5 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED) ENDIF () IF (${TSZ_ENABLED} MATCHES "true") - ADD_SUBDIRECTORY(TSZ) -ENDIF() \ No newline at end of file + ADD_SUBDIRECTORY(TSZ) +ENDIF() diff --git a/deps/TSZ b/deps/TSZ deleted file mode 160000 index 0ca5b15a8eac40327dd737be52c926fa5675712c..0000000000000000000000000000000000000000 --- a/deps/TSZ +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c diff --git a/deps/avro b/deps/avro new file mode 160000 index 0000000000000000000000000000000000000000..a1fce29d9675b4dd95dfee9db32cc505d0b2227c --- /dev/null +++ b/deps/avro @@ -0,0 +1 @@ +Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index a16154443c96cfd31cbc7c5d4b49caf3ccbeab9e..70a6b7c5281e1a96f8348ff3a3bb81892b80c93c 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -81,6 +81,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [Node.js Connector](/connector#nodejs):给node应用提供一个连接TDengine服务器的驱动 * [C# Connector](/connector#csharp):给C#应用提供一个连接TDengine服务器的驱动 * [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它 +* [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。 ## [与其他工具的连接](/connections) diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md index fee6708d3a51fa71fed64e31ade72a8dac05b259..3f91dbb35130a2ff78e5ef23219b79433af33ce3 100644 --- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md +++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md @@ -145,7 +145,7 @@ insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms ``` $ taosdemo --help --f, --file=FILE The meta file to the execution procedure. +-f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only. -u, --user=USER The user name to use when connecting to the server. -p, --password The password to use when connecting to the server. -c, --config-dir=CONFIG_DIR Configuration directory. @@ -442,7 +442,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维 taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用json格式的配置文件。 一、命令行参数 --f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。 +-f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。 -u: 用户名。可选项,缺省是“root“。 diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md index 9a0e9b388e639d5e6c6e5094682f07a223c01ada..a82aecd97c832f9b7f276ec27832097e46845dfc 100644 --- a/documentation20/cn/05.insert/docs.md +++ b/documentation20/cn/05.insert/docs.md @@ -27,13 +27,18 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。 -## Schemaless 写入 +## 无模式(Schemaless)写入 +**前言** +
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。 +
目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。 +
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。 -在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供 Schemaless 写入方式,可以免于预先创建超级表/数据子表,而是随着数据写入,自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless) 章节。这里对 Schemaless 的数据表达格式进行描述。 +**无模式写入行协议** +
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 Json 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。 -### Schemaless 数据行协议 +对于InfluxDB、OpenTSDB的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。 -Schemaless 采用一个字符串来表达最终存储的一个数据行(可以向 Schemaless 写入 API 中一次传入多个字符串来实现多个数据行的批量写入),其格式约定如下: +Schemaless 采用一个字符串来表达一个数据行(可以向写入 API 中一次传入多行字符串来实现多个数据行的批量写入),其格式约定如下: ```json measurement,tag_set field_set timestamp ``` @@ -44,51 +49,104 @@ measurement,tag_set field_set timestamp * field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。 * timestamp 即本行数据对应的主键时间戳。 -在 Schemaless 的数据行协议中,tag_set、field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: +tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。 +
在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说: * 如果两边有英文双引号,表示 BIANRY(32) 类型。例如 `"abc"`。 * 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。 * 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号) * 数值类型将通过后缀来区分数据类型: - - 没有后缀,为 FLOAT 类型; - - 后缀为 f32,为 FLOAT 类型; - - 后缀为 f64,为 DOUBLE 类型; - - 后缀为 i8,表示为 TINYINT (INT8) 类型; - - 后缀为 i16,表示为 SMALLINT (INT16) 类型; - - 后缀为 i32,表示为 INT (INT32) 类型; - - 后缀为 i64,表示为 BIGINT (INT64) 类型; + +| **序号** | **后缀** | **映射类型** | **大小(字节)** | +| -- | ------- | ---------| ------ | +| 1 | 无或f64 | double | 8 | +| 2 | f32 | float | 4 | +| 3 | i8 | TinyInt | 1 | +| 4 | i16 | SmallInt | 2 | +| 5 | i32 | Int | 4 | +| 6 | i64或i | Bigint | 8 | * t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。 +
例如如下数据行表示:向名为 st 的超级表下的 t1 标签为 "3"(NCHAR)、t2 标签为 "4"(NCHAR)、t3 标签为 "t3"(NCHAR)的数据子表,写入 c1 列为 3(BIGINT)、c2 列为 false(BOOL)、c3 列为 "passit"(BINARY)、c4 列为 4(DOUBLE)、主键时间戳为 1626006833639000000 的一行数据。 +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 +``` +需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。 -timestamp 位置的时间戳通过后缀来声明时间精度,具体如下: -* 不带任何后缀的长整数会被当作微秒来处理; -* 当后缀为 s 时,表示秒时间戳; -* 当后缀为 ms 时,表示毫秒时间戳; -* 当后缀为 us 时,表示微秒时间戳; -* 当后缀为 ns 时,表示纳秒时间戳; -* 当时间戳为 0 时,表示采用客户端的当前时间(因此,同一批提交的数据中,时间戳 0 会被解释为同一个时间点,于是就有可能导致时间戳重复)。 +### 无模式写入的主要处理逻辑 -例如,如下 Schemaless 数据行表示:向名为 st 的超级表下的 t1 标签为 3(BIGINT 类型)、t2 标签为 4(DOUBLE 类型)、t3 标签为 "t3"(BINARY 类型)的数据子表,写入 c1 列为 3(BIGINT 类型)、c2 列为 false(BOOL 类型)、c3 列为 "passit"(NCHAR 类型)、c4 列为 4(DOUBLE 类型)、主键时间戳为 1626006833639000000(纳秒精度)的一行数据。 +无模式写入按照如下原则来处理行数据: +1. 当 tag_set 中有 ID 字段时,该字段的值将作为子表的表名。 +2. 没有 ID 字段时,将使用如下规则来生成子表名: +首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串 +```json +"measurement,tag_key1=tag_value1,tag_key2=tag_value2" +``` +需要注意的是,这里的tag_key1, tag_key2并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。 +排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。 +
3. 如果解析行协议获得的超级表不存在,则会创建这个超级表。 +
4. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 +
5. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。 +
6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 +
7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 +
8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 +
9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 + +**备注:** +
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 + +**时间分辨率识别** +
无模式写入过程中支持三个指定的模式,具体如下 + +| **序号** | **值** | **说明** | +| ---- | ------------------- | ------------ | +| 1 | SML_LINE_PROTOCOL | InfluxDB行协议(Line Protocol) | +| 2 | SML_TELNET_PROTOCOL | OpenTSDB文本行协议 | +| 3 | SML_JSON_PROTOCOL | Json协议格式 | + +
在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示:
+ +| **序号** | **时间分辨率定义** | **含义** | +| ---- | ----------------------------- | --------- | +| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | 未定义(无效) | +| 2 | TSDB_SML_TIMESTAMP_HOURS | 小时 | +| 3 | TSDB_SML_TIMESTAMP_MINUTES | 分钟 | +| 4 | TSDB_SML_TIMESTAMP_SECONDS | 秒 | +| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | 毫秒 | +| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | 微秒 | +| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | 纳秒 | + +在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。 + +**数据模式变更处理** +
本节将说明不同行数据写入情况下,对于数据模式的影响。 + +在使用行协议写入一个明确的标识的字段类型的时候,后续更改该字段的类型定义,会出现明确的数据模式错误,即会触发写入 API 报告错误。如下所示, ```json -st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000 ``` +第一行的数据类型映射将 c4 列定义为 Double, 但是第二行的数据又通过数值后缀方式声明该列为 BigInt, 由此会触发无模式写入的解析错误。 -需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。 +如果列前面的行协议将数据列声明为了 binary, 后续的要求长度更长的binary长度,此时会触发超级表模式的变更。 +```json +st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000 +``` +第一行中行协议解析会声明 c5 列是一个 binary(4)的字段,第二次行数据写入会提取列 c5 仍然是 binary 列,但是其宽度为 6,此时需要将binary的宽度增加到能够容纳 新字符串的宽度。 +```json +st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000 +st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000 +``` +第二行数据相对于第一行来说增加了一个列 c6,类型为binary(6)。那么此时会自动增加一个列 c6, 类型为 binary(6)。 -### Schemaless 的处理逻辑 +**写入完整性** +
TDengine 提供数据写入的幂等性保证,即您可以反复调用 API 进行出错数据的写入操作。但是不提供多行数据写入的原子性保证。即在多行数据一批次写入过程中,会出现部分数据写入成功,部分数据写入失败的情况。 -Schemaless 按照如下原则来处理行数据: -1. 当 tag_set 中有 ID 字段时,该字段的值将作为数据子表的表名。 -2. 没有 ID 字段时,将使用 `measurement + tag_value1 + tag_value2 + ...` 的 md5 值来作为子表名。 -3. 如果指定的超级表名不存在,则 Schemaless 会创建这个超级表。 -4. 如果指定的数据子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。 -5. 如果数据行中指定的标签列或普通列不存在,则 Schemaless 会在超级表中增加对应的标签列或普通列(只增不减)。 -6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。 -7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,那么 Schemaless 会增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。 -8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。 -9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。 +**错误码** +
如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR 错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 taos_errstr 获取具体的错误原因。 -**注意:**Schemaless 所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。 +**后续升级计划** +
当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 Taos Adapter 采用 REST 的方式直接写入无模式数据。 -关于 Schemaless 的字符串编码处理、时区设置等,均会沿用 TAOSC 客户端的设置。 ## Prometheus 直接写入 @@ -183,10 +241,10 @@ use prometheus; select * from apiserver_request_latencies_bucket; ``` -## Telegraf 直接写入(通过 BLM v3) +## Telegraf 直接写入(通过 taosadapter) 安装 Telegraf 请参考[官方文档](https://portal.influxdata.com/downloads/)。 -TDengine 新版本(2.3.0.0+)包含一个 BLM3 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。 配置方法,在 /etc/telegraf/telegraf.conf 增加如下文字,其中 database name 请填写希望在 TDengine 保存 Telegraf 数据的数据库名,TDengine server/cluster host、username和 password 填写 TDengine 实际值: ``` @@ -206,14 +264,14 @@ sudo systemctl start telegraf ``` 即可在 TDengine 中查询 metrics 数据库中 Telegraf 写入的数据。 -BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。 +taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## collectd 直接写入(通过 BLM v3) +## collectd 直接写入(通过 taosadapter) 安装 collectd,请参考[官方文档](https://collectd.org/download.shtml)。 -TDengine 新版本(2.3.0.0+)包含一个 BLM3 独立程序,负责接收包括 collectd 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。 -在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 BLM3 配置的实际值: +在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值: ``` LoadPlugin network @@ -224,15 +282,15 @@ LoadPlugin network ``` sudo systemctl start collectd ``` -BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。 +taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## StatsD 直接写入(通过 BLM v3) +## StatsD 直接写入(通过 taosadapter) 安装 StatsD 请参考[官方文档](https://github.com/statsd/statsd)。 -TDengine 新版本(2.3.0.0+)包含一个 BLM3 独立程序,负责接收包括 StatsD 的多种应用的数据写入。 +TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。 -在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 BLM3 配置的实际值: +在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值: ``` backends 部分添加 "./backends/repeater" repeater 部分添加 { host:'', port: } @@ -247,12 +305,12 @@ port: 8125 } ``` -BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。 +taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。 -## 使用 Bailongma 2.0 接入 Telegraf 数据写入 +## 使用 Bailongma 2.0 接入 Telegraf 数据写入 -*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 BLM v3,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。 +*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosadapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。 [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。 diff --git a/documentation20/cn/08.connector/02.rust/docs.md b/documentation20/cn/08.connector/02.rust/docs.md new file mode 100644 index 0000000000000000000000000000000000000000..01d4087e3acf2eed2dbea207d6d48ff360b5aece --- /dev/null +++ b/documentation20/cn/08.connector/02.rust/docs.md @@ -0,0 +1,110 @@ +# Rust 连接器 + +![Crates.io](https://img.shields.io/crates/v/libtaos) ![Crates.io](https://img.shields.io/crates/d/libtaos) + +> Rust 连接器仍然在快速开发中,版本API变动在所难免,在1.0 之前无法保证其向后兼容,请使用时注意版本及对应的文档。 + +感谢 [@songtianyi](https://github.com/songtianyi) 对 [libtdengine](https://github.com/songtianyi/tdengine-rust-bindings) 的贡献,使Rust社区能够使用Rust 连接[TDengine]. [libtaos-rs] 项目旨在为Rust开发者提供官方支持,使用taosc接口及HTTP接口构建兼容API以便于用户切换接口方式。 + +## 依赖 + +- [Rust](https://www.rust-lang.org/learn/get-started) + +默认情况下,[libtaos-rs] 使用 C 接口连接数据库,所以您需要: + +- [TDengine] [客户端](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85) +- `clang`: `bindgen` 使用 `libclangAST` 来生成对应的Rust绑定。 + +## 特性列表 + +- [x] C 接口的Rust绑定 +- [x] 使用 `rest` feature 来启用 RESTful API. +- [x] [r2d2] 连接池支持(feature `r2d2`) +- [ ] 迭代器接口 +- [ ] 流式计算接口 +- [ ] 订阅支持 + +## 构建和测试 + +```sh +cargo build +cargo test +``` + +测试使用默认用户名密码和本地连接。您可以根据具体情况设置环境变量: + +- `TEST_TAOS_IP` +- `TEST_TAOS_PORT` +- `TEST_TAOS_USER` +- `TEST_TAOS_PASS` +- `TEST_TAOS_DB` + +## 使用 + +使用默认的taosc 连接方式,可以在 `Cargo.toml` 中直接添加 `libtaos` 依赖: + +```toml +[dependencies] +libtaos = "v0.3.8" +``` + +添加 feature `r2d2` 来启动连接池: + +```toml +[dependencies] +libtaos = { version = "*", features = ["r2d2"] } +``` + +对于RESTful接口,可使用 `rest` 特性来替代taosc,免去安装TDengine客户端。 + +```toml +[dependencies] +libtaos = { version = "*", features = ["rest"] } +``` + +本项目中提供一个 [示例程序]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) 如下: + +```rust +// ... +#[tokio::main] +async fn main() -> Result<(), Error> { + init(); + let taos = taos_connect()?; + + assert_eq!( + taos.query("drop database if exists demo").await.is_ok(), + true + ); + assert_eq!(taos.query("create database demo").await.is_ok(), true); + assert_eq!(taos.query("use demo").await.is_ok(), true); + assert_eq!( + taos.query("create table m1 (ts timestamp, speed int)") + .await + .is_ok(), + true + ); + + for i in 0..10i32 { + assert_eq!( + taos.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str()) + .await + .is_ok(), + true + ); + } + let rows = taos.query("select * from m1").await?; + + println!("{}", rows.column_meta.into_iter().map(|col| col.name).join(",")); + for row in rows.rows { + println!("{}", row.into_iter().join(",")); + } + Ok(()) +} +``` + +您可以在 [bailongma-rs] - 一个 Rust 编写的 Prometheus 远程存储 API 适配器 - 看到如何在具体应用中使用 Rust 连接器。 + +[libtaos-rs]: https://github.com/taosdata/libtaos-rs +[TDengine]: https://github.com/taosdata/TDengine +[bailongma-rs]: https://github.com/taosdata/bailongma-rs +[r2d2]: https://crates.io/crates/r2d2 \ No newline at end of file diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md index 799cfc14a300d3f4c9fcbf8537f04984ae8e1df4..bc3259365d0b658184318e994ffd31a9e4ffee90 100644 --- a/documentation20/cn/09.connections/docs.md +++ b/documentation20/cn/09.connections/docs.md @@ -3,7 +3,7 @@ ## Grafana -TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。 +TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。 ### 安装Grafana @@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ### 配置Grafana -TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。 - -以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 +TDengine 的 Grafana 插件请从 下载。 ```bash -sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine +GF_VERSION=3.1.1 +wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip ``` -Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: +以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。 + +```bash +sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/ ``` + +Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件: + +```ini [plugins] -enable_alpha = true -allow_loading_unsigned_plugins = taosdata-tdengine-datasource +allow_loading_unsigned_plugins = tdengine-datasource ``` ### 使用 Grafana @@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource * ALIAS BY:可设置当前查询别名。 * GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。 - 按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下: ![img](../images/connections/create_dashboard2.jpg) @@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource #### 导入 Dashboard -在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。 +我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。 -点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件: +点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载: ![img](../images/connections/import_dashboard1.jpg) 导入完成之后可看到如下效果: -![img](../images/connections/import_dashboard2.jpg) - +![img](../images/connections/dashboard-15146.png) ## MATLAB diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 64020208abe45d589058414fb123d1616c67f2c7..4ba496d575e0f680c2dbd2820d3dfc062c56cb1c 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -729,17 +729,17 @@ Query OK, 1 row(s) in set (0.001091s) | **Operation** | **Note** | **Applicable Data Types** | | ------------- | ------------------------ | ----------------------------------------- | -| > | larger than | **`timestamp`** and all numeric types | -| < | smaller than | **`timestamp`** and all numeric types | -| >= | larger than or equal to | **`timestamp`** and all numeric types | -| <= | smaller than or equal to | **`timestamp`** and all numeric types | +| > | larger than | all types except bool | +| < | smaller than | all types except bool | +| >= | larger than or equal to | all types except bool | +| <= | smaller than or equal to | all types except bool | | = | equal to | all types | | <> | not equal to | all types | | is [not] null | is null or is not null | all types | -| between and | within a certain range | **`timestamp`** and all numeric types | +| between and | within a certain range | all types except bool | | in | match any value in a set | all types except first column `timestamp` | | like | match a wildcard string | **`binary`** **`nchar`** | -| match/nmatch | filter regex | **regex** | +| match/nmatch | filter regex | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. like 算子使用通配符字符串进行匹配检查。 @@ -766,15 +766,10 @@ Query OK, 1 row(s) in set (0.001091s) **使用限制** - 只能针对表名(即 tbname 筛选)和标签的名称和binary类型标签值 进行正则表达式过滤,不支持针对普通列使用正则表达式过滤。 - - 只能在 WHERE 子句中作为过滤条件存在。 + 只能针对表名(即 tbname 筛选)、binary/nchar类型标签值进行正则表达式过滤,不支持普通列的过滤。 正则匹配字符串长度不能超过 128 字节。可以通过参数 *maxRegexStringLen* 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启才能生效。 - **嵌套查询支持** - - 可以在内层查询和外层查询中使用。 ### JOIN 子句 diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index 7483c972eebe26d0b010724ea699cd94906f382c..eb5f20e708bb4bb592a1ab2d535fcf261457b989 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -185,23 +185,23 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端 | TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 | | TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 | | TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | -| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 BLM3 启动参数设置变化(2.3.0.1+以上版本)。 | -| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 BLM3 启动参数设置变化(2.3.0.1+以上版本)。 | +| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 | +| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 | | TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | | | UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 | | UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 | ## 20. go 语言编写组件编译失败怎样解决? -新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 BLM3 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 -使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 blm3 仓库代码后再编译。 +新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosadapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。 +使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosadapter 仓库代码后再编译。 -目前编译方式默认自动编译 blm3。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: +目前编译方式默认自动编译 taosadapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: ```sh go env -w GO111MODULE=on go env -w GOPROXY=https://goproxy.cn,direct ``` -如果希望继续使用之前的内置 httpd,可以关闭 blm3 编译,使用 +如果希望继续使用之前的内置 httpd,可以关闭 taosadapter 编译,使用 `cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md index 4bdcd52d62f8c3a95bc91261b77242e5263a8f23..04765602dab18fbacf7d92d44ca324db660c0ac4 100644 --- a/documentation20/cn/14.devops/01.telegraf/docs.md +++ b/documentation20/cn/14.devops/01.telegraf/docs.md @@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ## 数据链路设置 -### 复制 TDengine 插件到 grafana 插件目录 -``` -1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine -2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine -3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini -4. sudo systemctl restart grafana-server.service +### 下载 TDengine 插件到 grafana 插件目录 + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service ``` ### 修改 /etc/telegraf/telegraf.conf @@ -61,7 +63,7 @@ sudo systemctl start telegraf 使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。 点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。 -点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘: +点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘: ![IT-DevOps-Solutions-telegraf-dashboard.png](../../images/IT-DevOps-Solutions-telegraf-dashboard.png) diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md index 2a031d63e55ed7888332757170b781beae787ff7..5860e70ceafafadc21c5772c96515e0925897e3a 100644 --- a/documentation20/cn/14.devops/02.collectd/docs.md +++ b/documentation20/cn/14.devops/02.collectd/docs.md @@ -30,15 +30,17 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如 ## 数据链路设置 ### 复制 TDengine 插件到 grafana 插件目录 -``` -1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine -2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine -3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini -4. sudo systemctl restart grafana-server.service + +```bash +1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip +2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/ +3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine +4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini +5. sudo systemctl restart grafana-server.service ``` ### 配置 collectd -在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 BLM3 配置的实际值: +在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值: ``` LoadPlugin network @@ -49,7 +51,7 @@ sudo systemctl start collectd ``` ### 配置 StatsD -在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 BLM3 配置的实际值: +在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 Taos Adapter 配置的实际值: ``` backends 部分添加 "./backends/repeater" repeater 部分添加 { host:'', port: } @@ -62,13 +64,13 @@ repeater 部分添加 { host:'', port: Note that the rust connector is under active development and the APIs will changes a lot between versions. But we promise to ensure backward compatibility after version 1.0 . + +Thanks [@songtianyi](https://github.com/songtianyi) for [libtdengine](https://github.com/songtianyi/tdengine-rust-bindings) - a rust bindings project for [TDengine]. It's an new design for [TDengine] rust client based on C interface or the REST API. It'll will provide Rust-like APIs and all rust things (like async/stream/iterators and others). + +## Dependencies + +- [Rust](https://www.rust-lang.org/learn/get-started) of course. + +if you use the default features, it'll depend on: + +- [TDengine] Client library and headers. +- clang because bindgen will requires the clang AST library. + +## Fetures + +In-design features: + +- [x] API for both C interface +- [x] REST API support by feature `rest`. +- [x] [r2d2] Pool support by feature `r2d2` +- [ ] Iterators for fields fetching +- [ ] Stream support +- [ ] Subscribe support + +## Build and test + +```sh +cargo build +cargo test +``` + +`test` will use default TDengine user and password on localhost (TDengine default). + +Set variables if it's not default: + +- `TEST_TAOS_IP` +- `TEST_TAOS_PORT` +- `TEST_TAOS_USER` +- `TEST_TAOS_PASS` +- `TEST_TAOS_DB` + +## Usage + +For default C-based client API, set in Cargo.toml + +```toml +[dependencies] +libtaos = "v0.3.8" +``` + +For r2d2 support: + +```toml +[dependencies] +libtaos = { version = "*", features = ["r2d2"] } +``` + +For REST client: + +```toml +[dependencies] +libtaos = { version = "*", features = ["rest"] } +``` + +There's a [demo app]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) in examples directory, looks like this: + +```rust +// ... +#[tokio::main] +async fn main() -> Result<(), Error> { + init(); + let taos = taos_connect()?; + + assert_eq!( + taos.query("drop database if exists demo").await.is_ok(), + true + ); + assert_eq!(taos.query("create database demo").await.is_ok(), true); + assert_eq!(taos.query("use demo").await.is_ok(), true); + assert_eq!( + taos.query("create table m1 (ts timestamp, speed int)") + .await + .is_ok(), + true + ); + + for i in 0..10i32 { + assert_eq!( + taos.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str()) + .await + .is_ok(), + true + ); + } + let rows = taos.query("select * from m1").await?; + + println!("{}", rows.column_meta.into_iter().map(|col| col.name).join(",")); + for row in rows.rows { + println!("{}", row.into_iter().join(",")); + } + Ok(()) +} +``` + +You can check out the experimental [bailongma-rs](https://github.com/taosdata/bailongma-rs) - a TDengine adapters for prometheus written with Rust - as a more productive code example. + +[libtaos-rs]: https://github.com/taosdata/libtaos-rs +[TDengine]: https://github.com/taosdata/TDengine +[bailongma-rs]: https://github.com/taosdata/bailongma-rs +[r2d2]: https://crates.io/crates/r2d2 \ No newline at end of file diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md index f1bbf0ff639719c7609f4a04685adf9c16a4e623..b56458d351d23a2b61f88cfdf7dc64dc8043a295 100644 --- a/documentation20/en/09.connections/docs.md +++ b/documentation20/en/09.connections/docs.md @@ -12,12 +12,17 @@ https://grafana.com/grafana/download. ### Configure Grafana -TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory. +Download grafana plugin from . + +```bash +GF_VERSION=3.1.1 +wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip +``` Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana. ```bash -sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine +sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/ ``` ### Use Grafana @@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp #### Import Dashboard -A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`. +We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。 -Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file: +Click the `Import` button on the left panel and load the grafana id: ![img](../images/connections/import_dashboard1.jpg) You can see as follows after Dashboard imported. -![img](../images/connections/import_dashboard2.jpg) +![img](../images/connections/dashboard-15146.png) ## MATLAB diff --git a/documentation20/en/images/connections/dashboard-15146.png b/documentation20/en/images/connections/dashboard-15146.png new file mode 100644 index 0000000000000000000000000000000000000000..3eb240ad8ad648953e32f27e674e2a9171ed9af8 Binary files /dev/null and b/documentation20/en/images/connections/dashboard-15146.png differ diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 48f0bee6b34496603d67f74938857d7bb94627f2..e42212ff0f55420dfa5f23638a69439be795e43a 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -203,6 +203,9 @@ keepColumnName 1 # database name must be specified in restful interface if the following parameter is set, off by default # httpDbNameMandatory 1 +# http keep alive, default is 30 seconds +# httpKeepAlive 30000 + # The following parameter is used to limit the maximum number of lines in log files. # max number of lines per log filters # numOfLogLines 10000000 diff --git a/packaging/check_package.sh b/packaging/check_package.sh index edc98da65e5574b91efbce16f4df0fd042b18c13..0870e8c8eccc1a745ae5b081e2726ed8d809cf2b 100755 --- a/packaging/check_package.sh +++ b/packaging/check_package.sh @@ -128,12 +128,12 @@ function check_link() { function check_main_path() { #check install main dir and all sub dir main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d") - for i in ${main_dir[@]};do + for i in "${main_dir[@]}";do check_file ${install_main_dir} $i done if [ "$verMode" == "cluster" ]; then nginx_main_dir=("admin" "conf" "html" "sbin" "logs") - for i in ${nginx_main_dir[@]};do + for i in "${nginx_main_dir[@]}";do check_file ${nginx_dir} $i done fi @@ -142,12 +142,12 @@ function check_main_path() { function check_bin_path() { # check install bin dir and all sub dir - bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh") - for i in ${bin_dir[@]};do + bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh") + for i in "${bin_dir[@]}";do check_file ${sbin_dir} $i done - lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core") - for i in ${lbin_dir[@]};do + lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core") + for i in "${lbin_dir[@]}";do check_link ${bin_link_dir}/$i done if [ "$verMode" == "cluster" ]; then @@ -171,16 +171,17 @@ function check_lib_path() { function check_header_path() { # check all header header_dir=("taos.h" "taoserror.h") - for i in ${header_dir[@]};do + for i in "${header_dir[@]}";do check_link ${inc_link_dir}/$i done echo -e "Check bin path:\033[32mOK\033[0m!" } -function check_blm3_config_dir() { +function check_taosadapter_config_dir() { # check all config - check_file ${cfg_install_dir} blm3.toml - check_file ${install_main_dir}/cfg blm.toml.org + check_file ${cfg_install_dir} taosadapter.toml + check_file ${cfg_install_dir} taosadapter.service + check_file ${install_main_dir}/cfg taosadapter.toml.org echo -e "Check conf path:\033[32mOK\033[0m!" } @@ -221,7 +222,7 @@ function test_TDengine() { check_lib_path check_header_path check_config_dir - check_blm3_config_dir + check_taosadapter_config_dir check_log_path check_data_path result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:` diff --git a/packaging/deb/DEBIAN/control b/packaging/deb/DEBIAN/control index c01640d7e9adb4f7f8d6eb29f06008480dc8eee4..fd3f81ba082d11f6ff3979382a63597b5806fa1f 100644 --- a/packaging/deb/DEBIAN/control +++ b/packaging/deb/DEBIAN/control @@ -11,4 +11,3 @@ Maintainer: support@taosdata.com Provides: taosdata Homepage: http://taosdata.com Description: Big Data Platform Designed and Optimized for IoT. - diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst index 55218b471669887bd0d4066bb9ef91bf1f195031..4b8b72e9abd9e12d9f669cf5658be2468ebab40b 100644 --- a/packaging/deb/DEBIAN/preinst +++ b/packaging/deb/DEBIAN/preinst @@ -28,8 +28,12 @@ if [ -f "${install_main_dir}/taos.cfg" ]; then ${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : fi -if [ -f "${install_main_dir}/blm.toml" ]; then - ${csudo} rm -f ${install_main_dir}/cfg/blm.toml || : +if [ -f "${install_main_dir}/taosadapter.toml" ]; then + ${csudo} rm -f ${install_main_dir}/cfg/taosadapter.toml || : +fi + +if [ -f "${install_main_dir}/taosadapter.service" ]; then + ${csudo} rm -f ${install_main_dir}/cfg/taosadapter.service || : fi # there can not libtaos.so*, otherwise ln -s error diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index e2043ba54cef0db4f4fd729f2c2285c342b6b109..235834a747e82886eef6c4540877307aa4dd3996 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -25,7 +25,7 @@ else # Remove all links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/blm3 || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 2c18cec497c0a741c96f13afb06794e26e8eaf1c..f753668b3b1a83d15c126ae6b0d94c06e97c80aa 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -44,8 +44,11 @@ mkdir -p ${pkg_dir}${install_home_path}/init.d mkdir -p ${pkg_dir}${install_home_path}/script cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg -if [ -f "${compile_dir}/test/cfg/blm.toml" ]; then - cp ${compile_dir}/test/cfg/blm.toml ${pkg_dir}${install_home_path}/cfg +if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then + cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg +fi +if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then + cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg ||: fi cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d @@ -59,8 +62,8 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin -if [ -f "${compile_dir}/build/bin/blm3" ]; then - cp ${compile_dir}/build/bin/blm3 ${pkg_dir}${install_home_path}/bin ||: +if [ -f "${compile_dir}/build/bin/taosadapter" ]; then + cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||: fi cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin @@ -68,19 +71,24 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples -if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then - cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin -else - echo "grafanaplugin bundled directory not found!" - exit 1 -fi cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||: +install_user_local_path="/usr/local" + +if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then + mkdir -p ${pkg_dir}${install_user_local_path}/lib + cp ${compile_dir}/build/lib/libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/ + ln -sf libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/libavro.so.23 + ln -sf libavro.so.23 ${pkg_dir}${install_user_local_path}/lib/libavro.so +fi +if [ -f ${compile_dir}/build/lib/libavro.a ]; then + cp ${compile_dir}/build/lib/libavro.a ${pkg_dir}${install_user_local_path}/lib/ +fi + if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then - install_user_local_path="/usr/local" mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3} cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/ if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then @@ -120,6 +128,10 @@ chmod 755 ${pkg_dir}/DEBIAN/* debver="Version: "$tdengine_ver sed -i "2c$debver" ${pkg_dir}/DEBIAN/control +if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then + sed -i.bak "s/#Depends: no/Depends: libjansson4, libsnappy1v5/g" ${pkg_dir}/DEBIAN/control +fi + #get taos version, then set deb name @@ -151,4 +163,3 @@ cp ${pkg_dir}/*.deb ${output_dir} # clean tmep dir rm -rf ${pkg_dir} - diff --git a/packaging/deb/taosd b/packaging/deb/taosd index a14e61ac8cfb67b970ee89a2fd4cda9d7937b23f..5002607da20b621ca69a8a2a25e713879d0308af 100644 --- a/packaging/deb/taosd +++ b/packaging/deb/taosd @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash # # Modified from original source: Elastic Search # https://github.com/elasticsearch/elasticsearch @@ -25,7 +25,7 @@ GROUP="root" DAEMON="/usr/local/taos/bin/taosd" DAEMON_OPTS="" -HTTPD_NAME="blm3" +HTTPD_NAME="taosadapter" DAEMON_HTTPD_NAME=$HTTPD_NAME DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME" diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh index e4d3cda7f29fea96cabfe48f5b10ab668a085ea8..9f60b840d68577b751314e7ddecc9da98c20f8d6 100755 --- a/packaging/docker/dockerManifest.sh +++ b/packaging/docker/dockerManifest.sh @@ -45,24 +45,32 @@ echo "version=${version}" #docker manifest rm tdengine/tdengine:${version} if [ "$verType" == "beta" ]; then docker manifest inspect tdengine/tdengine-beta:latest + docker manifest inspect tdengine/tdengine-beta:${version} + docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest + sleep 30 + docker manifest rm tdengine/tdengine-beta:${version} docker manifest rm tdengine/tdengine-beta:latest docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version} docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password - docker manifest push tdengine/tdengine-beta:latest docker manifest push tdengine/tdengine-beta:${version} - + docker manifest push tdengine/tdengine-beta:latest elif [ "$verType" == "stable" ]; then docker manifest inspect tdengine/tdengine:latest + docker manifest inspect tdengine/tdengine:${version} + docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest + sleep 30 docker manifest rm tdengine/tdengine:latest + docker manifest rm tdengine/tdengine:${version} + docker manifest inspect tdengine/tdengine:latest + docker manifest inspect tdengine/tdengine:${version} docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version} docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest docker login -u tdengine -p ${passWord} #replace the docker registry username and password - docker manifest push tdengine/tdengine:latest docker manifest push tdengine/tdengine:${version} - + docker manifest push tdengine/tdengine:latest else echo "unknow verType, nor stabel or beta" exit 1 diff --git a/packaging/release.sh b/packaging/release.sh index 705103a87a35a73b2a91079707785279416644cd..b9fe25ec08e8dcd1170867fa20f4a4fe5a1ef2d1 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -151,7 +151,7 @@ function vercomp () { } # 1. check version information -if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then +if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then echo "please enter correct version" exit 0 fi @@ -213,7 +213,7 @@ else exit 1 fi -make -j8 +make -j8 && ${csudo} make install cd ${curr_dir} diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh index 4cc7daf1a4cd15d06db084faf23dd4fcb15a955d..42ceeb791b6154f7d22a477bf3b3c3b8c726869c 100755 --- a/packaging/rpm/makerpm.sh +++ b/packaging/rpm/makerpm.sh @@ -32,20 +32,20 @@ if command -v sudo > /dev/null; then fi function cp_rpm_package() { -local cur_dir -cd $1 -cur_dir=$(pwd) - -for dirlist in $(ls ${cur_dir}); do - if test -d ${dirlist}; then - cd ${dirlist} - cp_rpm_package ${cur_dir}/${dirlist} - cd .. - fi - if test -e ${dirlist}; then - cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm - fi -done + local cur_dir + cd $1 + cur_dir=$(pwd) + + for dirlist in "$(ls ${cur_dir})"; do + if test -d ${dirlist}; then + cd ${dirlist} + cp_rpm_package ${cur_dir}/${dirlist} + cd .. + fi + if test -e ${dirlist}; then + cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm + fi + done } if [ -d ${pkg_dir} ]; then @@ -56,6 +56,10 @@ cd ${pkg_dir} ${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS +if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then + sed -i.bak 's/#Requires:/Requires: jansson snappy/g' ${spec_file} +fi + ${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file} # copy rpm package to output_dir, and modify package name, then clean temp dir diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 19fe23d194be2266bcb68034e3c4fd90d9824f3d..f7b8462dbedc74a270a8560bb51a853e292cff27 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -54,8 +54,11 @@ mkdir -p %{buildroot}%{homepath}/init.d mkdir -p %{buildroot}%{homepath}/script cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg -if [ -f %{_compiledir}/test/cfg/blm.toml ]; then - cp %{_compiledir}/test/cfg/blm.toml %{buildroot}%{homepath}/cfg +if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then + cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg +fi +if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then + cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg fi cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script @@ -65,26 +68,28 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin -if [ -f %{_compiledir}/build/bin/blm3 ]; then - cp %{_compiledir}/build/bin/blm3 %{buildroot}%{homepath}/bin ||: +if [ -f %{_compiledir}/build/bin/taosadapter ]; then + cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||: fi cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include -if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then - cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin -else - echo grafanaplugin bundled directory not found! - exit 1 -fi cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||: cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples +if [ -f %{_compiledir}/build/lib/libavro.so.23.0.0 ]; then + cp %{_compiledir}/build/lib/libavro.so.23.0.0 %{buildroot}%{homepath}/driver + ln -sf libavro.so.23.0.0 %{buildroot}%{homepath}/driver/libavro.so.23 + ln -sf libavro.so.23 %{buildroot}%{homepath}/driver/libavro.so +fi +if [ -f %{_compiledir}/build/lib/libavro.a ]; then + cp %{_compiledir}/build/lib/libavro.a %{buildroot}%{homepath}/driver +fi if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then mkdir -p %{buildroot}%{userlocalpath}/bin @@ -151,14 +156,14 @@ if pidof taosd &> /dev/null; then echo "Stop taosd service success!" sleep 1 fi -# if taos.cfg already softlink, remove it +# if taos.cfg already exist, remove it if [ -f %{cfg_install_dir}/taos.cfg ]; then - ${csudo} rm -f %{homepath}/cfg/taos.cfg || : + ${csudo} rm -f %{cfg_install_dir}/cfg/taos.cfg || : fi -# if blm.toml already softlink, remove it -if [ -f %{cfg_install_dir}/blm.toml ]; then - ${csudo} rm -f %{homepath}/cfg/blm.toml || : +# if taosadapter.toml already exist, remove it +if [ -f %{cfg_install_dir}/taosadapter.toml ]; then + ${csudo} rm -f %{cfg_install_dir}/cfg/taosadapter.toml || : fi # there can not libtaos.so*, otherwise ln -s error @@ -199,7 +204,7 @@ if [ $1 -eq 0 ];then # Remove all links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/blm3 || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${cfg_link_dir}/* || : diff --git a/packaging/tools/check_os.sh b/packaging/tools/check_os.sh index 92522f7b82e166c1d6ec365619869ad68969155c..cc8c6e0e9366232deb9013db62b29afebd179135 100755 --- a/packaging/tools/check_os.sh +++ b/packaging/tools/check_os.sh @@ -1,4 +1,4 @@ -# /bin/bash +#!/bin/bash # CSI=$(echo -e "\033[") CRED="${CSI}1;31m" diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 2d3ed2e0f8f97c4604471659415a691d1b704a60..61fcd3e51982dab6a72245fe0ffb9de5ac51a664 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -185,7 +185,7 @@ function install_bin() { # Remove links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/blm3 || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -197,7 +197,7 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/blm3 ] && ${csudo} ln -s ${install_main_dir}/bin/blm3 ${bin_link_dir}/blm3 || : + [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || : @@ -303,7 +303,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -358,7 +358,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 @@ -447,18 +447,18 @@ function local_fqdn_check() { fi } -function install_blm3_config() { - if [ ! -f "${cfg_install_dir}/blm.toml" ]; then +function install_taosadapter_config() { + if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${script_dir}/cfg/blm.toml ] && ${csudo} cp ${script_dir}/cfg/blm.toml ${cfg_install_dir} - [ -f ${cfg_install_dir}/blm.toml ] && ${csudo} chmod 644 ${cfg_install_dir}/blm.toml + [ -f ${script_dir}/cfg/taosadapter.toml ] && ${csudo} cp ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/taosadapter.toml ] && ${csudo} chmod 644 ${cfg_install_dir}/taosadapter.toml fi - [ -f ${script_dir}/cfg/blm.toml ] && - ${csudo} cp -f ${script_dir}/cfg/blm.toml ${install_main_dir}/cfg/blm.toml.org + [ -f ${script_dir}/cfg/taosadapter.toml ] && + ${csudo} cp -f ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir}/taosadapter.toml.new - [ -f ${cfg_install_dir}/blm.toml ] && - ${csudo} ln -s ${cfg_install_dir}/blm.toml ${install_main_dir}/cfg/blm.toml + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo} ln -s ${cfg_install_dir}/taosadapter.toml ${install_main_dir}/cfg/taosadapter.toml [ ! -z $1 ] && return 0 || : # only install client @@ -473,7 +473,7 @@ function install_config() { ${csudo} chmod 644 ${cfg_install_dir}/* fi - ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org + ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${cfg_install_dir}/taos.cfg.new ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg [ ! -z $1 ] && return 0 || : # only install client @@ -679,8 +679,8 @@ function install_service_on_systemd() { taosd_service_config="${service_config_dir}/taosd.service" ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" @@ -756,6 +756,11 @@ function install_service_on_systemd() { fi } +function install_taosadapter_service() { + [ -f ${script_dir}/cfg/taosadapter.service ] &&\ + ${csudo} cp ${script_dir}/cfg/taosadapter.service ${service_config_dir}/ +} + function install_service() { if ((${service_mod}==0)); then install_service_on_systemd @@ -878,8 +883,9 @@ function update_TDengine() { if [ -z $1 ]; then install_bin install_service + install_taosadapter_service install_config - install_blm3_config + install_taosadapter_config openresty_work=false if [ "$verMode" == "cluster" ]; then @@ -959,6 +965,7 @@ function install_TDengine() { # For installing new install_bin install_service + install_taosadapter_service openresty_work=false if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 05eb09d8f3a8b5237c36714e964530b877e332de..0e0ee7ba31f4715b2c5585dd040727d604aa90b1 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -342,7 +342,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh index 527f9a231e5a97fa086ef655cd420abc61677fcf..e5675b858066148df07508ad2438b0f00d7ce7bf 100755 --- a/packaging/tools/install_pro.sh +++ b/packaging/tools/install_pro.sh @@ -278,7 +278,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -305,7 +305,7 @@ function set_hostname() { echo "set hostname fail!" return fi - + #ubuntu/centos /etc/hostname if [[ -e /etc/hostname ]]; then ${csudo} echo $newHostname > /etc/hostname ||: @@ -330,7 +330,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh index 52e08cb6b0d00b25686b87e2f066401e0388d4ce..ef5fb8c05a4a98a55918ee217125bd0f0a09b955 100755 --- a/packaging/tools/install_tq.sh +++ b/packaging/tools/install_tq.sh @@ -287,7 +287,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -342,7 +342,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 7fbdbab1c798af572fc67cf79f27812ea64d3bae..8309fa516c4ffdcd9e5a17056304427543dad0a9 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -114,8 +114,8 @@ if [ "$osType" != "Darwin" ]; then fi fi -function kill_blm3() { - pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}') +function kill_taosadapter() { + pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -156,7 +156,7 @@ function install_bin() { # Remove links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/blm3 || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : @@ -176,7 +176,7 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || : [ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || : - [ -x ${install_main_dir}/bin/blm3 ] && ${csudo} ln -s ${install_main_dir}/bin/blm3 ${bin_link_dir}/blm3 || : + [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : @@ -191,7 +191,7 @@ function install_bin() { #Make link [ -x ${install_main_dir}/bin/taos ] || [ -x ${install_main_2_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || ${csudo} ln -s ${install_main_2_dir}/bin/taos || : [ -x ${install_main_dir}/bin/taosd ] || [ -x ${install_main_2_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || ${csudo} ln -s ${install_main_2_dir}/bin/taosd || : - [ -x ${install_main_dir}/bin/blm3 ] || [ -x ${install_main_2_dir}/bin/blm3 ] && ${csudo} ln -s ${install_main_dir}/bin/blm3 ${bin_link_dir}/blm3 || ${csudo} ln -s ${install_main_2_dir}/bin/blm3 || : + [ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo} ln -s ${install_main_2_dir}/bin/taosadapter || : [ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || : [ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || : fi @@ -212,7 +212,8 @@ function install_jemalloc() { fi if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then /usr/bin/install -c -d /usr/local/include/jemalloc - /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc + /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\ + /usr/local/include/jemalloc fi if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then /usr/bin/install -c -d /usr/local/lib @@ -225,23 +226,47 @@ function install_jemalloc() { /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then /usr/bin/install -c -d /usr/local/lib/pkgconfig - /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig + /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\ + /usr/local/lib/pkgconfig + fi + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" fi fi if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then /usr/bin/install -c -d /usr/local/share/doc/jemalloc - /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc + /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\ + /usr/local/share/doc/jemalloc fi if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then /usr/bin/install -c -d /usr/local/share/man/man3 - /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3 + /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\ + /usr/local/share/man/man3 fi - if [ -d /etc/ld.so.conf.d ]; then - echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf - ${csudo} ldconfig - else - echo "/etc/ld.so.conf.d not found!" + fi +} + +function install_avro() { + if [ "$osType" != "Darwin" ]; then + if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then + /usr/bin/install -c -d /usr/local/$1 + /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1 + ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23 + ln -sf libavro.so.23 /usr/local/$1/libavro.so + /usr/bin/install -c -d /usr/local/$1 + [ -f ${binary_dir}/build/$1/libavro.a ] && + /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1 + + if [ -d /etc/ld.so.conf.d ]; then + echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf + ${csudo} ldconfig + else + echo "/etc/ld.so.conf.d not found!" + fi fi fi } @@ -292,6 +317,8 @@ function install_lib() { fi install_jemalloc + install_avro lib + install_avro lib64 if [ "$osType" != "Darwin" ]; then ${csudo} ldconfig @@ -324,39 +351,33 @@ function install_config() { [ -f ${script_dir}/../cfg/taos.cfg ] && ${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir} ${csudo} chmod 644 ${cfg_install_dir}/taos.cfg - ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg/taos.cfg + ${csudo} cp -f ${script_dir}/../cfg/taos.cfg \ + ${cfg_install_dir}/taos.cfg.${verNumber} + ${csudo} ln -s ${cfg_install_dir}/taos.cfg \ + ${install_main_dir}/cfg/taos.cfg else - if [ "$osType" != "Darwin" ]; then - ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org - else - ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org\ - || ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_2_dir}/cfg/taos.cfg.org - fi + ${csudo} cp -f ${script_dir}/../cfg/taos.cfg \ + ${cfg_install_dir}/taos.cfg.${verNumber} fi } -function install_blm3_config() { - if [ ! -f "${cfg_install_dir}/blm.toml" ]; then +function install_taosadapter_config() { + if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${binary_dir}/test/cfg/blm.toml ] && - ${csudo} cp ${binary_dir}/test/cfg/blm.toml ${cfg_install_dir} - [ -f ${cfg_install_dir}/blm.toml ] && - ${csudo} chmod 644 ${cfg_install_dir}/blm.toml - [ -f ${binary_dir}/test/cfg/blm.toml ] && - ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml ${install_main_dir}/cfg/blm.toml.org - [ -f ${cfg_install_dir}/blm.toml ] && - ${csudo} ln -s ${cfg_install_dir}/blm.toml ${install_main_dir}/cfg/blm.toml + [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && + ${csudo} cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo} chmod 644 ${cfg_install_dir}/taosadapter.toml + [ -f ${binary_dir}/test/cfg/taosadapter.toml ] && + ${csudo} cp -f ${binary_dir}/test/cfg/taosadapter.toml \ + ${cfg_install_dir}/taosadapter.toml.${verNumber} + [ -f ${cfg_install_dir}/taosadapter.toml ] && \ + ${csudo} ln -s ${cfg_install_dir}/taosadapter.toml \ + ${install_main_dir}/cfg/taosadapter.toml else - if [ -f "${binary_dir}/test/cfg/blm.toml" ]; then - if [ "$osType" != "Darwin" ]; then - ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml \ - ${install_main_dir}/cfg/blm.toml.org - else - ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml ${install_main_dir}/cfg/blm.toml.org \ - || ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml \ - ${install_main_2_dir}/cfg/blm.toml.org - fi + if [ -f "${binary_dir}/test/cfg/taosadapter.toml" ]; then + ${csudo} cp -f ${binary_dir}/test/cfg/taosadapter.toml \ + ${cfg_install_dir}/taosadapter.toml.${verNumber} fi fi } @@ -381,11 +402,6 @@ function install_data() { } function install_connector() { - if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then - ${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then ${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector else @@ -481,8 +497,8 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" @@ -503,6 +519,16 @@ function install_service_on_systemd() { ${csudo} systemctl enable taosd } +function install_taosadapter_service() { + if ((${service_mod}==0)); then + [ -f ${binary_dir}/test/cfg/taosadapter.service ] &&\ + ${csudo} cp ${binary_dir}/test/cfg/taosadapter.service\ + ${service_config_dir}/ || : + else + kill_taosadapter + fi +} + function install_service() { if ((${service_mod}==0)); then install_service_on_systemd @@ -510,7 +536,6 @@ function install_service() { install_service_on_sysvinit else # must manual stop taosd - kill_blm3 kill_taosd fi } @@ -526,7 +551,7 @@ function update_TDengine() { elif ((${service_mod}==1)); then ${csudo} service taosd stop || : else - kill_blm3 + kill_taosadapter kill_taosd fi sleep 1 @@ -544,10 +569,11 @@ function update_TDengine() { if [ "$osType" != "Darwin" ]; then install_service + install_taosadapter_service fi install_config - install_blm3_config + install_taosadapter_config if [ "$osType" != "Darwin" ]; then echo @@ -555,7 +581,7 @@ function update_TDengine() { echo echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - echo -e "${GREEN_DARK}To configure blm3 (if has) ${NC}: edit /etc/taos/blm.toml" + echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit /etc/taos/taosadapter.toml" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" elif ((${service_mod}==1)); then @@ -598,10 +624,11 @@ function install_TDengine() { if [ "$osType" != "Darwin" ]; then install_service + install_taosadapter_service fi install_config - install_blm3_config + install_taosadapter_config if [ "$osType" != "Darwin" ]; then # Ask if to start the service @@ -609,7 +636,7 @@ function install_TDengine() { echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" echo echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" - echo -e "${GREEN_DARK}To configure blm (if has) ${NC}: edit /etc/taos/blm.toml" + echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit /etc/taos/taosadapter.toml" if ((${service_mod}==0)); then echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" elif ((${service_mod}==1)); then diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh index d26f617e421406364ce4d34c4baf5c55b904a2b5..39a35e384fffdd4f319e72fbeb819fe08f7871b8 100755 --- a/packaging/tools/makeclient.sh +++ b/packaging/tools/makeclient.sh @@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh index 89591cac234b190f55d144ccf98cb2d5c70a7936..19e24b3dafb7f1f95832e637e181449e4c381faf 100755 --- a/packaging/tools/makeclient_power.sh +++ b/packaging/tools/makeclient_power.sh @@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh index 599c91fbf082955887c677b750aa12f946c0890b..4a0b033d30e6478f37a62f9cc896aee0903d39c9 100755 --- a/packaging/tools/makeclient_pro.sh +++ b/packaging/tools/makeclient_pro.sh @@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh index 03d9b13059daadfdc7207c78b6f89cae321f25ac..1cc7003661a7491b1df625916dd289de32434ee9 100755 --- a/packaging/tools/makeclient_tq.sh +++ b/packaging/tools/makeclient_tq.sh @@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then if [ "$osType" != "Darwin" ]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: fi - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index f0c25208529768fb387262a668381a57e34f51ac..05b49ff6a9599c6050d2ccad778f63d285981420 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -35,12 +35,12 @@ fi if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/taosd strip ${build_dir}/bin/taos - # lite version doesn't include blm3, which will lead to no restful interface + # lite version doesn't include taosadapter, which will lead to no restful interface bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh" else bin_files="${build_dir}/bin/taosd \ ${build_dir}/bin/taos \ - ${build_dir}/bin/blm3 \ + ${build_dir}/bin/taosadapter \ ${build_dir}/bin/taosdump \ ${build_dir}/bin/taosdemo \ ${build_dir}/bin/tarbitrator\ @@ -78,7 +78,7 @@ mkdir -p ${install_dir} mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg -[ -f ${cfg_dir}/blm.toml ] && cp ${cfg_dir}/blm.toml ${install_dir}/cfg/blm.toml +[ -f ${cfg_dir}/taosadapter.toml ] && cp ${cfg_dir}/taosadapter.toml ${install_dir}/cfg/taosadapter.toml mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || : mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb @@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector" mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh index dbb7e6887fa1b0f96ea68f1c880ee77ced0858bd..65200ddd047358f92f8e3a612c08eedb60053311 100755 --- a/packaging/tools/makepkg_power.sh +++ b/packaging/tools/makepkg_power.sh @@ -81,7 +81,7 @@ else # bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh" cp ${build_dir}/bin/taos ${install_dir}/bin/power cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd - cp ${build_dir}/bin/blm3 ${install_dir}/bin/blm3 ||: + cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: cp ${script_dir}/remove_power.sh ${install_dir}/bin cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump @@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh index 1668838be0522bc02ab027b6ee4ac6ff250fefa2..457cb0de6f02f7000dc7437cde61bfec28c7205c 100755 --- a/packaging/tools/makepkg_pro.sh +++ b/packaging/tools/makepkg_pro.sh @@ -62,7 +62,7 @@ else fi cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs -cp ${build_dir}/bin/blm3 ${install_dir}/bin/blm3 ||: +cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: cp ${script_dir}/remove_pro.sh ${install_dir}/bin chmod a+x ${install_dir}/bin/* || : @@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo #if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then # cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: -# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then -# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin -# else -# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" -# fi # if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then # cp -r ${connector_dir}/go ${install_dir}/connector # else diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh index 416a3f60a4a57d6afa34d1d8f931a7efd68d6958..07032379d7e4bab2636f3685b6edb620780a124a 100755 --- a/packaging/tools/makepkg_tq.sh +++ b/packaging/tools/makepkg_tq.sh @@ -82,7 +82,7 @@ else cp ${build_dir}/bin/taos ${install_dir}/bin/tq cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd cp ${script_dir}/remove_tq.sh ${install_dir}/bin - cp ${build_dir}/bin/blm3 ${install_dir}/bin/blm3 ||: + cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||: cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump cp ${build_dir}/bin/tarbitrator ${install_dir}/bin @@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then cp ${build_dir}/lib/*.jar ${install_dir}/connector ||: - if [ -d "${connector_dir}/grafanaplugin/dist" ]; then - cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin - else - echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!" - fi if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then cp -r ${connector_dir}/go ${install_dir}/connector else diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 9956455691a9d042d20082eb70cd23d99c1cca77..c3db7e417adb11b92d55464b69c715e3aee2d6bb 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -2,7 +2,7 @@ # # This file is used to install tdengine rpm package on centos systems. The operating system # is required to use systemd to manage services at boot -#set -x +# set -x iplist="" serverFqdn="" @@ -64,9 +64,9 @@ else service_mod=2 fi -function kill_blm3() { -# ${csudo} pkill -f blm3 || : - pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}') +function kill_taosadapter() { +# ${csudo} pkill -f taosadapter || : + pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -86,6 +86,24 @@ function install_include() { ${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h } +function install_avro_lib() { + ${csudo} rm -f ${lib_link_dir}/libavro* || : + ${csudo} rm -f ${lib64_link_dir}/libavro* || : + + if [[ -f ${lib_dir}/libavro.so.23.0.0 ]]; then + ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23.0.0 + ${csudo} ln -s ${lib_link_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23 + ${csudo} ln -s ${lib_link_dir}/libavro.so.23 ${lib_link_dir}/libavro.so + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libavro.so ]]; then + ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23.0.0 || : + ${csudo} ln -s ${lib64_link_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23 || : + ${csudo} ln -s ${lib64_link_dir}/libavro.so.23 ${lib64_link_dir}/libavro.so || : + fi + fi + + ${csudo} ldconfig +} function install_lib() { ${csudo} rm -f ${lib_link_dir}/libtaos* || : ${csudo} rm -f ${lib64_link_dir}/libtaos* || : @@ -97,13 +115,15 @@ function install_lib() { ${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : fi + + ${csudo} ldconfig } function install_bin() { # Remove links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/blm3 || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -114,7 +134,7 @@ function install_bin() { #Make link [ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : - [ -x ${bin_dir}/blm3 ] && ${csudo} ln -s ${bin_dir}/blm3 ${bin_link_dir}/blm3 || : + [ -x ${bin_dir}/taosadapter ] && ${csudo} ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : [ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : @@ -127,7 +147,7 @@ function add_newHostname_to_hosts() { iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') arr=($iphost) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$localIp" ]]; then return @@ -182,7 +202,7 @@ function is_correct_ipaddr() { IFS=" " arr=($iplist) IFS="$OLD_IFS" - for s in ${arr[@]} + for s in "${arr[@]}" do if [[ "$s" == "$newIp" ]]; then return 0 @@ -271,20 +291,20 @@ function local_fqdn_check() { fi } -function install_blm3_config() { - if [ ! -f "${cfg_install_dir}/blm.toml" ]; then +function install_taosadapter_config() { + if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then [ ! -d %{cfg_install_dir} ] && ${csudo} ${csudo} mkdir -p ${cfg_install_dir} - [ -f ${cfg_dir}/blm.toml ] && ${csudo} cp ${cfg_dir}/blm.toml ${cfg_install_dir} - [ -f ${cfg_install_dir}/blm.toml ] && - ${csudo} chmod 644 ${cfg_install_dir}/blm.toml + [ -f ${cfg_dir}/taosadapter.toml ] && ${csudo} cp ${cfg_dir}/taosadapter.toml ${cfg_install_dir} + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo} chmod 644 ${cfg_install_dir}/taosadapter.toml fi - [ -f ${cfg_dir}/blm.toml ] && - ${csudo} mv ${cfg_dir}/blm.toml ${cfg_dir}/blm.toml.org + [ -f ${cfg_dir}/taosadapter.toml ] && + ${csudo} mv ${cfg_dir}/taosadapter.toml ${cfg_dir}/taosadapter.toml.new - [ -f ${cfg_install_dir}/blm.toml ] && - ${csudo} ln -s ${cfg_install_dir}/blm.toml ${cfg_dir} + [ -f ${cfg_install_dir}/taosadapter.toml ] && + ${csudo} ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir} } function install_config() { @@ -302,7 +322,7 @@ function install_config() { # restore the backup standard input, and turn off 6 exec 0<&6 6<&- - ${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.org + ${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.new ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir} #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" @@ -424,8 +444,8 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}" - ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}" + ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" @@ -446,6 +466,10 @@ function install_service_on_systemd() { ${csudo} systemctl enable taosd } +function install_taosadapter_service() { + [ -f ${cfg_dir}/taosadapter.service ] && ${csudo} cp ${cfg_dir}/taosadapter.service ${service_config_dir} +} + function install_service() { if ((${service_mod}==0)); then install_service_on_systemd @@ -453,7 +477,7 @@ function install_service() { install_service_on_sysvinit else # manual start taosd - kill_blm3 + kill_taosadapter kill_taosd fi } @@ -474,10 +498,12 @@ function install_TDengine() { # Install include, lib, binary and service install_include install_lib + install_avro_lib install_bin - install_service install_config - install_blm3_config + install_taosadapter_config + install_taosadapter_service + install_service # Ask if to start the service #echo diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh index 16a892d26c1d11cddf5dc15758e784c9ff268822..d2d36364208f23492d2ba6aefa783c85ad6d5572 100755 --- a/packaging/tools/preun.sh +++ b/packaging/tools/preun.sh @@ -43,8 +43,8 @@ else service_mod=2 fi -function kill_blm3() { - pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}') +function kill_taosadapter() { + pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -58,6 +58,12 @@ function kill_taosd() { } function clean_service_on_systemd() { + taosadapter_service_config="${service_config_dir}/taosadapter.service" + if systemctl is-active --quiet taosadapter; then + echo "taosadapter is running, stopping it..." + ${csudo} systemctl stop taosadapter &> /dev/null || echo &> /dev/null + fi + taosd_service_config="${service_config_dir}/${taos_service_name}.service" if systemctl is-active --quiet ${taos_service_name}; then @@ -67,6 +73,9 @@ function clean_service_on_systemd() { ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + + [ -f ${taosadapter_service_config} ] && ${csudo} rm -f ${taosadapter_service_config} + } function clean_service_on_sysvinit() { @@ -100,7 +109,7 @@ function clean_service() { clean_service_on_sysvinit else # must manual stop taosd - kill_blm3 + kill_taosadapter kill_taosd fi } @@ -111,11 +120,11 @@ clean_service # Remove all links ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : -${csudo} rm -f ${bin_link_dir}/blm3 || : +${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/set_core || : -${csudo} rm -f ${cfg_link_dir}/* || : +${csudo} rm -f ${cfg_link_dir}/*.new || : ${csudo} rm -f ${inc_link_dir}/taos.h || : ${csudo} rm -f ${inc_link_dir}/taoserror.h || : ${csudo} rm -f ${lib_link_dir}/libtaos.* || : @@ -125,7 +134,7 @@ ${csudo} rm -f ${log_link_dir} || : ${csudo} rm -f ${data_link_dir} || : if ((${service_mod}==2)); then - kill_blm3 + kill_taosadapter kill_taosd fi diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index f4c3350b7861ce8c027b54641e56fa99f87afbb8..07a8362b2c45676986513020da668ff9235f00fa 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -54,8 +54,8 @@ else service_mod=2 fi -function kill_blm3() { - pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}') +function kill_taosadapter() { + pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then ${csudo} kill -9 $pid || : fi @@ -78,7 +78,7 @@ function clean_bin() { # Remove link ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : - ${csudo} rm -f ${bin_link_dir}/blm3 || : + ${csudo} rm -f ${bin_link_dir}/taosadapter || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : @@ -111,12 +111,14 @@ function clean_log() { function clean_service_on_systemd() { taosd_service_config="${service_config_dir}/${taos_service_name}.service" + taosadapter_service_config="${service_config_dir}/taosadapter.service" if systemctl is-active --quiet ${taos_service_name}; then echo "TDengine taosd is running, stopping it..." ${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null fi ${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null ${csudo} rm -f ${taosd_service_config} + [ -f ${taosadapter_service_config} ] && ${sudo} rm -f ${taosadapter_service_config} tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service" if systemctl is-active --quiet ${tarbitrator_service_name}; then @@ -191,7 +193,7 @@ function clean_service() { clean_service_on_sysvinit else # must manual stop taosd - kill_blm3 + kill_taosadapter kill_taosd kill_tarbitrator fi diff --git a/packaging/tools/startPre.sh b/packaging/tools/startPre.sh index 2f466f94f08555b5c8cf8d5b4abe459f52ece49f..8a0ab11a4d37ffb9ad244faa2946cbbf10ce2026 100755 --- a/packaging/tools/startPre.sh +++ b/packaging/tools/startPre.sh @@ -9,8 +9,8 @@ line=`grep StartLimitBurst ${taosd}` num=${line##*=} #echo "burst num: ${num}" -startSeqFile=/usr/local/taos/.startSeq -recordFile=/usr/local/taos/.startRecord +startSeqFile=/var/log/taos/.startSeq +recordFile=/var/log/taos/.startRecord startSeq=0 @@ -48,4 +48,3 @@ if [ ${coreFlag} = "unlimited" ];then fi fi -/usr/bin/blm3 & diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 47af7568642d0badccda51a28c09d321cf782571..64e3af498cedd25dea90055426110522bc4a4086 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core20 -version: '2.3.0.0' +version: '2.3.1.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | diff --git a/src/client/inc/tscParseLine.h b/src/client/inc/tscParseLine.h index 74ba9ab3d9c5251e1cf8ab4e8549c8da0353ea49..fef55011b0faec1d15876764b3fd9808ec2b4e39 100644 --- a/src/client/inc/tscParseLine.h +++ b/src/client/inc/tscParseLine.h @@ -66,8 +66,7 @@ typedef struct { int32_t affectedRows; } SSmlLinesInfo; - -void addEscapeCharToString(char *str, int32_t len); +char* addEscapeCharToString(char *str, int32_t len); int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info); bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info); bool isValidInteger(char *str); diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index da51961d0ce8cd1a73cbef3272bc4d4471858cdc..c3c65018a50aea8e7f36d89c15c6b7faa12f2047 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -358,9 +358,13 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) { int num_fields = taos_num_fields(pSql); TAOS_FIELD *fields = taos_fetch_fields(pSql); - char buf[TSDB_COL_NAME_LEN + 16]; for (int i = 0; i < num_fields; i++) { - memset(buf, 0, sizeof(buf)); + char *buf = calloc(1, lengths[i] + 1); + if (buf == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + memset(buf, 0, lengths[i] + 1); int32_t ret = tscGetNthFieldResult(row, fields, lengths, i, buf); if (i == 0) { @@ -373,10 +377,13 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) { } else { snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s,", buf); } + + free(buf); + if (i == num_fields - 1) { sprintf(result + strlen(result) - 1, "%s", ")"); } - } + } if (0 == strlen(result)) { return TSDB_CODE_TSC_INVALID_TABLE_NAME; diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c index a6953add19f4d0949caa6513c8ee6e3cf2a871e3..af57f7ec8c6c192bf84915abd86728ab8f195835 100644 --- a/src/client/src/tscParseLineProtocol.c +++ b/src/client/src/tscParseLineProtocol.c @@ -631,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo* if (code != 0) { tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName); return code; - } else { - pointSchema->precision = dbSchema.precision; - destroySmlSTableSchema(&dbSchema); } - } else if (code == TSDB_CODE_SUCCESS) { + } + + if (code == TSDB_CODE_SUCCESS) { + pointSchema->precision = dbSchema.precision; size_t pointTagSize = taosArrayGetSize(pointSchema->tags); size_t pointFieldSize = taosArrayGetSize(pointSchema->fields); @@ -1177,13 +1177,14 @@ static void escapeSpecialCharacter(uint8_t field, const char **pos) { *pos = cur; } -void addEscapeCharToString(char *str, int32_t len) { +char* addEscapeCharToString(char *str, int32_t len) { if (str == NULL) { - return; + return NULL; } memmove(str + 1, str, len); str[0] = str[len + 1] = TS_ESCAPE_CHAR; str[len + 2] = '\0'; + return str; } bool isValidInteger(char *str) { @@ -1511,9 +1512,9 @@ static bool convertStrToNumber(TAOS_SML_KV *pVal, char *str, SSmlLinesInfo* info errno = 0; uint8_t type = pVal->type; int16_t length = pVal->length; - int64_t val_s; - uint64_t val_u; - double val_d; + int64_t val_s = 0; + uint64_t val_u = 0; + double val_d = 0.0; strntolower_s(str, str, (int32_t)strlen(str)); if (IS_FLOAT_TYPE(type)) { @@ -1813,7 +1814,7 @@ static int32_t getTimeStampValue(char *value, uint16_t len, int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value, uint16_t len, SSmlLinesInfo* info) { int32_t ret; - SMLTimeStampType type; + SMLTimeStampType type = SML_TIME_STAMP_NOW; int64_t tsVal; ret = isTimeStamp(value, len, &type, info); @@ -1907,8 +1908,6 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash } //Escape special character if (*cur == '\\') { - //TODO: escape will work after column & tag - //support spcial characters escapeSpecialCharacter(2, &cur); } key[len] = *cur; @@ -1985,6 +1984,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index, //Escape special character if (*cur == '\\') { escapeSpecialCharacter(isTag ? 2 : 3, &cur); + len++; } cur++; len++; @@ -2107,6 +2107,13 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, pkv = *pKVs; } + size_t childTableNameLen = strlen(tsSmlChildTableName); + char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + if (childTableNameLen != 0) { + memcpy(childTableName, tsSmlChildTableName, childTableNameLen); + addEscapeCharToString(childTableName, (int32_t)(childTableNameLen)); + } + while (*cur != '\0') { ret = parseSmlKey(pkv, &cur, pHash, info); if (ret) { @@ -2118,7 +2125,8 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs, tscError("SML:0x%"PRIx64" Unable to parse value", info->id); goto error; } - if (!isField && (strcasecmp(pkv->key, "`ID`") == 0)) { + + if (!isField && childTableNameLen != 0 && strcasecmp(pkv->key, childTableName) == 0) { smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1); memcpy(smlData->childTableName, pkv->value, pkv->length); strntolower_s(smlData->childTableName, smlData->childTableName, (int32_t)pkv->length); @@ -2405,7 +2413,7 @@ static SSqlObj* createSmlQueryObj(TAOS* taos, int32_t affected_rows, int32_t cod TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol, int precision) { int code = TSDB_CODE_SUCCESS; int affected_rows = 0; - SMLTimeStampType tsType; + SMLTimeStampType tsType = SML_TIME_STAMP_NOW; if (protocol == TSDB_SML_LINE_PROTOCOL) { code = convertPrecisionType(precision, &tsType); diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c index f6b723ef3cd554a4062035c6352ee485022340ac..e78abf0596447df0ee58db88ca87b19011293c6c 100644 --- a/src/client/src/tscParseOpenTSDB.c +++ b/src/client/src/tscParseOpenTSDB.c @@ -305,6 +305,12 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, *pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV)); pkv = *pKVs; + size_t childTableNameLen = strlen(tsSmlChildTableName); + char childTbName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0}; + if (childTableNameLen != 0) { + memcpy(childTbName, tsSmlChildTableName, childTableNameLen); + addEscapeCharToString(childTbName, (int32_t)(childTableNameLen)); + } while (*cur != '\0') { ret = parseTelnetTagKey(pkv, &cur, pHash, info); if (ret) { @@ -316,7 +322,7 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs, tscError("OTD:0x%"PRIx64" Unable to parse value", info->id); return ret; } - if ((strcasecmp(pkv->key, "`ID`") == 0)) { + if (childTableNameLen != 0 && strcasecmp(pkv->key, childTbName) == 0) { *childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1); memcpy(*childTableName, pkv->value, pkv->length); (*childTableName)[pkv->length] = '\0'; @@ -892,26 +898,33 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs, if (tags == NULL || tags->type != cJSON_Object) { return TSDB_CODE_TSC_INVALID_JSON; } - //only pick up the first ID value as child table name - cJSON *id = cJSON_GetObjectItem(tags, "ID"); - if (id != NULL) { - if (!cJSON_IsString(id)) { - tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id); - return TSDB_CODE_TSC_INVALID_JSON; - } - size_t idLen = strlen(id->valuestring); - *childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char)); - memcpy(*childTableName, id->valuestring, idLen); - strntolower_s(*childTableName, *childTableName, (int32_t)idLen); - addEscapeCharToString(*childTableName, (int32_t)idLen); - - //check duplicate IDs - cJSON_DeleteItemFromObject(tags, "ID"); - id = cJSON_GetObjectItem(tags, "ID"); + + //handle child table name + size_t childTableNameLen = strlen(tsSmlChildTableName); + char childTbName[TSDB_TABLE_NAME_LEN] = {0}; + if (childTableNameLen != 0) { + memcpy(childTbName, tsSmlChildTableName, childTableNameLen); + cJSON *id = cJSON_GetObjectItem(tags, childTbName); if (id != NULL) { - return TSDB_CODE_TSC_DUP_TAG_NAMES; + if (!cJSON_IsString(id)) { + tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id); + return TSDB_CODE_TSC_INVALID_JSON; + } + size_t idLen = strlen(id->valuestring); + *childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char)); + memcpy(*childTableName, id->valuestring, idLen); + strntolower_s(*childTableName, *childTableName, (int32_t)idLen); + addEscapeCharToString(*childTableName, (int32_t)idLen); + + //check duplicate IDs + cJSON_DeleteItemFromObject(tags, childTbName); + id = cJSON_GetObjectItem(tags, childTbName); + if (id != NULL) { + return TSDB_CODE_TSC_DUP_TAG_NAMES; + } } } + int32_t tagNum = cJSON_GetArraySize(tags); //at least one tag pair required if (tagNum <= 0) { diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4eb2cc9a3da2f6e400dd5f85e364d934afad1a47..c52ff8782e20ac504383474793d9be85c98f114a 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2454,6 +2454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col const char* msg12 = "parameter is out of range [1, 100]"; const char* msg13 = "parameter list required"; const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'"; + const char* msg15 = "parameter is out of range [1, 1000]"; switch (functionId) { case TSDB_FUNC_COUNT: { @@ -2901,11 +2902,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col } } } else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) { + if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); - int64_t numRowsSelected = GET_INT32_VAL(val); + int64_t numRowsSelected = GET_INT64_VAL(val); if (numRowsSelected <= 0 || numRowsSelected > 1000) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12); + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15); } // todo REFACTOR @@ -5748,6 +5753,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq const char* msg9 = "orderby column must projected in subquery"; const char* msg10 = "not support distinct mixed with order by"; const char* msg11 = "not support order with udf"; + const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5846,6 +5852,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq size_t s = taosArrayGetSize(pSortOrder); if (s == 1) { if (orderByTags) { + if (tscIsDiffDerivLikeQuery(pQueryInfo)) { + return invalidOperationMsg(pMsgBuf, msg12); + } pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b19af46a0c7f191b84d1ea8658f13456624179c9..0eba04ffb2e500e0d7a0ab6f005a217b6027f41c 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2955,7 +2955,8 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool // in case of child table, here only get the if (pMeta->tableType == TSDB_CHILD_TABLE) { int32_t code = tscCreateTableMetaFromSTableMeta(pSql, &pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity, (STableMeta **)(&pSTMeta)); - pSql->pBuf = (void *)(pSTMeta); + pSql->pBuf = (void *)(pSTMeta); + pMeta = pTableMetaInfo->pTableMeta; if (code != TSDB_CODE_SUCCESS) { return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate); } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index bb3bddeefd798366fe205eb67b55b3b4a7301df4..89da3c5640c6523d4d2a816b8ae0293310c5830a 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -781,6 +781,16 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) { return isNull(((char*) pSql->res.urow[col]) + row * pInfo->field.bytes, pInfo->field.type); } +bool taos_is_update_query(TAOS_RES *res) { + SSqlObj *pSql = (SSqlObj *)res; + if (pSql == NULL || pSql->signature != pSql) { + return false; + } + + SSqlCmd* pCmd = &pSql->cmd; + return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_RESET_CACHE == pCmd->command || TSDB_SQL_USE_DB == pCmd->command); +} + int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int len = 0; @@ -909,7 +919,6 @@ int taos_validate_sql(TAOS *taos, const char *sql) { strtolower(pSql->sqlstr, sql); -// pCmd->curSql = NULL; if (NULL != pCmd->insertParam.pTableBlockHashList) { taosHashCleanup(pCmd->insertParam.pTableBlockHashList); pCmd->insertParam.pTableBlockHashList = NULL; @@ -934,6 +943,17 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return code; } +void taos_reset_current_db(TAOS *taos) { + STscObj* pObj = (STscObj*) taos; + if (pObj == NULL || pObj->signature != pObj) { + return; + } + + pthread_mutex_lock(&pObj->mutex); + memset(pObj->db, 0, tListLen(pObj->db)); + pthread_mutex_unlock(&pObj->mutex); +} + void loadMultiTableMetaCallback(void *param, TAOS_RES *res, int code) { SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param); if (pSql == NULL) { diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 799bacda2ba9a3b52a99859edb5968d8602b4c33..bd201d980017522d0e32f6124290305d5b136f8d 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -46,7 +46,7 @@ extern int64_t tsDnodeStartTime; // common extern int tsRpcTimer; extern int tsRpcMaxTime; -extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled +extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled extern int32_t tsMaxConnections; extern int32_t tsMaxShellConns; extern int32_t tsShellActivityTimer; @@ -57,19 +57,20 @@ extern float tsRatioOfQueryCores; extern int8_t tsDaylight; extern char tsTimezone[]; extern char tsLocale[]; -extern char tsCharset[]; // default encode string +extern char tsCharset[]; // default encode string extern int8_t tsEnableCoreFile; extern int32_t tsCompressMsgSize; extern int32_t tsCompressColData; extern int32_t tsMaxNumOfDistinctResults; extern char tsTempDir[]; -//query buffer management -extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing -extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing -extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked +// query buffer management +extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing +extern int64_t + tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing +extern int32_t tsRetrieveBlockingModel; // retrieve threads will be blocked -extern int8_t tsKeepOriginalColumnName; +extern int8_t tsKeepOriginalColumnName; // client extern int32_t tsMaxSQLStringLen; @@ -108,7 +109,7 @@ extern int32_t tsQuorum; extern int8_t tsUpdate; extern int8_t tsCacheLastRow; -//tsdb +// tsdb extern bool tsdbForceKeepFile; extern bool tsdbForceCompactFile; extern int32_t tsdbWalFlushSize; @@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress; extern int8_t tsHttpEnableRecordSql; extern int8_t tsTelegrafUseFieldNum; extern int8_t tsHttpDbNameMandatory; +extern int32_t tsHttpKeepAlive; // mqtt extern int8_t tsEnableMqttModule; @@ -170,22 +172,22 @@ extern int64_t tsTickPerDay[3]; extern int32_t tsTopicBianryLen; // system info -extern char tsOsName[]; -extern int64_t tsPageSize; -extern int64_t tsOpenMax; -extern int64_t tsStreamMax; -extern int32_t tsNumOfCores; -extern float tsTotalLogDirGB; -extern float tsTotalTmpDirGB; -extern float tsTotalDataDirGB; -extern float tsAvailLogDirGB; -extern float tsAvailTmpDirectorySpace; -extern float tsAvailDataDirGB; -extern float tsUsedDataDirGB; -extern float tsMinimalLogDirGB; -extern float tsReservedTmpDirectorySpace; -extern float tsMinimalDataDirGB; -extern int32_t tsTotalMemoryMB; +extern char tsOsName[]; +extern int64_t tsPageSize; +extern int64_t tsOpenMax; +extern int64_t tsStreamMax; +extern int32_t tsNumOfCores; +extern float tsTotalLogDirGB; +extern float tsTotalTmpDirGB; +extern float tsTotalDataDirGB; +extern float tsAvailLogDirGB; +extern float tsAvailTmpDirectorySpace; +extern float tsAvailDataDirGB; +extern float tsUsedDataDirGB; +extern float tsMinimalLogDirGB; +extern float tsReservedTmpDirectorySpace; +extern float tsMinimalDataDirGB; +extern int32_t tsTotalMemoryMB; extern uint32_t tsVersion; // build info @@ -196,43 +198,44 @@ extern char gitinfoOfInternal[]; extern char buildinfo[]; // log -extern int8_t tsAsyncLog; -extern int32_t tsNumOfLogLines; -extern int32_t tsLogKeepDays; -extern int32_t dDebugFlag; -extern int32_t vDebugFlag; -extern int32_t mDebugFlag; +extern int8_t tsAsyncLog; +extern int32_t tsNumOfLogLines; +extern int32_t tsLogKeepDays; +extern int32_t dDebugFlag; +extern int32_t vDebugFlag; +extern int32_t mDebugFlag; extern uint32_t cDebugFlag; -extern int32_t jniDebugFlag; -extern int32_t tmrDebugFlag; -extern int32_t sdbDebugFlag; -extern int32_t httpDebugFlag; -extern int32_t mqttDebugFlag; -extern int32_t monDebugFlag; -extern int32_t uDebugFlag; -extern int32_t rpcDebugFlag; -extern int32_t odbcDebugFlag; +extern int32_t jniDebugFlag; +extern int32_t tmrDebugFlag; +extern int32_t sdbDebugFlag; +extern int32_t httpDebugFlag; +extern int32_t mqttDebugFlag; +extern int32_t monDebugFlag; +extern int32_t uDebugFlag; +extern int32_t rpcDebugFlag; +extern int32_t odbcDebugFlag; extern uint32_t qDebugFlag; -extern int32_t wDebugFlag; -extern int32_t cqDebugFlag; -extern int32_t debugFlag; +extern int32_t wDebugFlag; +extern int32_t cqDebugFlag; +extern int32_t debugFlag; extern int8_t tsClientMerge; #ifdef TD_TSZ // lossy -extern char lossyColumns[]; -extern double fPrecision; -extern double dPrecision; +extern char lossyColumns[]; +extern double fPrecision; +extern double dPrecision; extern uint32_t maxRange; extern uint32_t curRange; -extern char Compressor[]; +extern char Compressor[]; #endif // long query extern int8_t tsDeadLockKillQuery; // schemaless extern char tsDefaultJSONStrType[]; +extern char tsSmlChildTableName[]; typedef struct { diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index ebfd5e18756298c18d1d2060bed30b2aee00d1b0..c1a254b4ebd5fdfe1d29e02ab7cacbe3195058f1 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -14,18 +14,18 @@ */ #define _DEFAULT_SOURCE +#include "tglobal.h" +#include "monitor.h" #include "os.h" #include "taosdef.h" #include "taoserror.h" -#include "tulog.h" +#include "tcompare.h" #include "tconfig.h" -#include "tglobal.h" -#include "monitor.h" -#include "tsocket.h" -#include "tutil.h" #include "tlocale.h" +#include "tsocket.h" #include "ttimezone.h" -#include "tcompare.h" +#include "tulog.h" +#include "tutil.h" // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -49,16 +49,16 @@ int32_t tsDnodeId = 0; int64_t tsDnodeStartTime = 0; // common -int32_t tsRpcTimer = 300; -int32_t tsRpcMaxTime = 600; // seconds; -int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default -int32_t tsMaxShellConns = 50000; +int32_t tsRpcTimer = 300; +int32_t tsRpcMaxTime = 600; // seconds; +int32_t tsRpcForceTcp = 0; // disable this, means query, show command use udp protocol as default +int32_t tsMaxShellConns = 50000; int32_t tsMaxConnections = 5000; -int32_t tsShellActivityTimer = 3; // second +int32_t tsShellActivityTimer = 3; // second float tsNumOfThreadsPerCore = 1.0f; int32_t tsNumOfCommitThreads = 4; float tsRatioOfQueryCores = 1.0f; -int8_t tsDaylight = 0; +int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string @@ -87,7 +87,7 @@ int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN; int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN; -int8_t tsTscEnableRecordSql = 0; +int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from // one virtual node, to order according to timestamp @@ -97,7 +97,7 @@ int32_t tsMaxNumOfOrderedResults = 1000000; int32_t tsMinSlidingTime = 10; // the maxinum number of distict query result -int32_t tsMaxNumOfDistinctResults = 1000 * 10000; +int32_t tsMaxNumOfDistinctResults = 1000 * 10000; // 1 us for interval time range, changed accordingly int32_t tsMinIntervalTime = 1; @@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000; int32_t tsStreamCompStartDelay = 10000; // the stream computing delay time after executing failed, change accordingly -int32_t tsRetryStreamCompDelay = 10*1000; +int32_t tsRetryStreamCompDelay = 10 * 1000; // The delayed computing ration. 10% of the whole computing time window by default. float tsStreamComputDelayRatio = 0.1f; @@ -128,41 +128,41 @@ int64_t tsQueryBufferSizeBytes = -1; int32_t tsRetrieveBlockingModel = 0; // last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name -int8_t tsKeepOriginalColumnName = 0; +int8_t tsKeepOriginalColumnName = 0; // db parameters int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS; -int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE; -int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP; +int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE; +int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP; int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK; int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK; -int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds +int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION; -int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; -int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; -int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; -int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; -int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; -int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION; -int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION; -int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; -int32_t tsMaxVgroupsPerDb = 0; +int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL; +int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL; +int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD; +int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION; +int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION; +int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION; +int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION; +int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW; +int32_t tsMaxVgroupsPerDb = 0; int32_t tsMinTablePerVnode = TSDB_TABLES_STEP; int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES; int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP; int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO; -// tsdb config +// tsdb config // For backward compatibility bool tsdbForceKeepFile = false; -bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly +bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB // balance int8_t tsEnableBalance = 1; int8_t tsAlternativeRole = 0; -int32_t tsBalanceInterval = 300; // seconds +int32_t tsBalanceInterval = 300; // seconds int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days int32_t tsMnodeEqualVnodeNum = 4; int8_t tsEnableFlowCtrl = 1; @@ -180,15 +180,16 @@ int8_t tsHttpEnableCompress = 1; int8_t tsHttpEnableRecordSql = 0; int8_t tsTelegrafUseFieldNum = 0; int8_t tsHttpDbNameMandatory = 0; +int32_t tsHttpKeepAlive = 30000; // mqtt int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default -char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org"; -char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883"; -char tsMqttUser[TSDB_MQTT_USER_LEN] = {0}; -char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0}; -char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber"; -char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # +char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org"; +char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883"; +char tsMqttUser[TSDB_MQTT_USER_LEN] = {0}; +char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0}; +char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber"; +char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // # // monitor int8_t tsEnableMonitorModule = 1; @@ -197,7 +198,7 @@ char tsInternalPass[] = "secretkey"; int32_t tsMonitorInterval = 30; // seconds // stream -int8_t tsEnableStream = 1; +int8_t tsEnableStream = 1; // internal int8_t tsCompactMnodeWal = 0; @@ -213,7 +214,7 @@ char tsDataDir[PATH_MAX] = {0}; char tsScriptDir[PATH_MAX] = {0}; char tsTempDir[PATH_MAX] = "/tmp/"; -int32_t tsDiskCfgNum = 0; +int32_t tsDiskCfgNum = 0; int32_t tsTopicBianryLen = 16000; #ifndef _STORAGE @@ -231,42 +232,42 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS]; int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L}; // system info -char tsOsName[10] = "Linux"; -int64_t tsPageSize; -int64_t tsOpenMax; -int64_t tsStreamMax; -int32_t tsNumOfCores = 1; -float tsTotalTmpDirGB = 0; -float tsTotalDataDirGB = 0; -float tsAvailTmpDirectorySpace = 0; -float tsAvailDataDirGB = 0; -float tsUsedDataDirGB = 0; -float tsReservedTmpDirectorySpace = 1.0f; -float tsMinimalDataDirGB = 2.0f; -int32_t tsTotalMemoryMB = 0; +char tsOsName[10] = "Linux"; +int64_t tsPageSize; +int64_t tsOpenMax; +int64_t tsStreamMax; +int32_t tsNumOfCores = 1; +float tsTotalTmpDirGB = 0; +float tsTotalDataDirGB = 0; +float tsAvailTmpDirectorySpace = 0; +float tsAvailDataDirGB = 0; +float tsUsedDataDirGB = 0; +float tsReservedTmpDirectorySpace = 1.0f; +float tsMinimalDataDirGB = 2.0f; +int32_t tsTotalMemoryMB = 0; uint32_t tsVersion = 0; // log -int32_t tsNumOfLogLines = 10000000; -int32_t mDebugFlag = 131; -int32_t sdbDebugFlag = 131; -int32_t dDebugFlag = 135; -int32_t vDebugFlag = 135; +int32_t tsNumOfLogLines = 10000000; +int32_t mDebugFlag = 131; +int32_t sdbDebugFlag = 131; +int32_t dDebugFlag = 135; +int32_t vDebugFlag = 135; uint32_t cDebugFlag = 131; -int32_t jniDebugFlag = 131; -int32_t odbcDebugFlag = 131; -int32_t httpDebugFlag = 131; -int32_t mqttDebugFlag = 131; -int32_t monDebugFlag = 131; +int32_t jniDebugFlag = 131; +int32_t odbcDebugFlag = 131; +int32_t httpDebugFlag = 131; +int32_t mqttDebugFlag = 131; +int32_t monDebugFlag = 131; uint32_t qDebugFlag = 131; -int32_t rpcDebugFlag = 131; -int32_t uDebugFlag = 131; -int32_t debugFlag = 0; -int32_t sDebugFlag = 135; -int32_t wDebugFlag = 135; -int32_t tsdbDebugFlag = 131; -int32_t cqDebugFlag = 131; -int32_t fsDebugFlag = 135; +int32_t rpcDebugFlag = 131; +int32_t uDebugFlag = 131; +int32_t debugFlag = 0; +int32_t sDebugFlag = 135; +int32_t wDebugFlag = 135; +int32_t tsdbDebugFlag = 131; +int32_t cqDebugFlag = 131; +int32_t fsDebugFlag = 135; int8_t tsClientMerge = 0; @@ -274,13 +275,14 @@ int8_t tsClientMerge = 0; // // lossy compress 6 // -char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress. -// below option can take effect when tsLossyColumns not empty -double fPrecision = 1E-8; // float column precision -double dPrecision = 1E-16; // double column precision -uint32_t maxRange = 500; // max range -uint32_t curRange = 100; // range -char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR +char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty + // can close lossy compress. +// below option can take effect when tsLossyColumns not empty +double fPrecision = 1E-8; // float column precision +double dPrecision = 1E-16; // double column precision +uint32_t maxRange = 500; // max range +uint32_t curRange = 100; // range +char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR #endif // long query death-lock @@ -288,6 +290,7 @@ int8_t tsDeadLockKillQuery = 0; // default JSON string type char tsDefaultJSONStrType[7] = "binary"; +char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash. int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -298,7 +301,7 @@ char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"}; static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT; void taosSetAllDebugFlag() { - if (debugFlag != 0) { + if (debugFlag != 0) { mDebugFlag = debugFlag; sdbDebugFlag = debugFlag; dDebugFlag = debugFlag; @@ -309,7 +312,7 @@ void taosSetAllDebugFlag() { httpDebugFlag = debugFlag; mqttDebugFlag = debugFlag; monDebugFlag = debugFlag; - qDebugFlag = debugFlag; + qDebugFlag = debugFlag; rpcDebugFlag = debugFlag; uDebugFlag = debugFlag; sDebugFlag = debugFlag; @@ -321,12 +324,13 @@ void taosSetAllDebugFlag() { } bool taosCfgDynamicOptions(char *msg) { - char *option, *value; - int32_t olen, vlen; - int32_t vint = 0; + char *option, *value; + int32_t olen, vlen; + int32_t vint = 0; paGetToken(msg, &option, &olen); - if (olen == 0) return false;; + if (olen == 0) return false; + ; paGetToken(option + olen + 1, &value, &vlen); if (vlen == 0) @@ -339,9 +343,9 @@ bool taosCfgDynamicOptions(char *msg) { for (int32_t i = 0; i < tsGlobalConfigNum; ++i) { SGlobalCfg *cfg = tsGlobalConfig + i; - //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; + // if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue; if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue; - + int32_t cfgLen = (int32_t)strlen(cfg->option); if (cfgLen != olen) continue; if (strncasecmp(option, cfg->option, olen) != 0) continue; @@ -370,7 +374,7 @@ bool taosCfgDynamicOptions(char *msg) { return true; } if (strncasecmp(cfg->option, "debugFlag", olen) == 0) { - taosSetAllDebugFlag(); + taosSetAllDebugFlag(); } return true; } @@ -427,7 +431,7 @@ static void taosCheckDataDirCfg() { } static int32_t taosCheckTmpDir(void) { - if (strlen(tsTempDir) <= 0){ + if (strlen(tsTempDir) <= 0) { uError("tempDir is not set"); return -1; } @@ -448,7 +452,7 @@ static void doInitGlobalConfig(void) { srand(taosSafeRand()); SGlobalCfg cfg = {0}; - + // ip address cfg.option = "firstEp"; cfg.ptr = tsFirst; @@ -577,12 +581,12 @@ static void doInitGlobalConfig(void) { cfg.ptr = &tsMaxNumOfDistinctResults; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; - cfg.minValue = 10*10000; - cfg.maxValue = 10000*10000; + cfg.minValue = 10 * 10000; + cfg.maxValue = 10000 * 10000; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - + cfg.option = "numOfMnodes"; cfg.ptr = &tsNumOfMnodes; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1189,7 +1193,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - // module configs + // module configs cfg.option = "flowctrl"; cfg.ptr = &tsEnableFlowCtrl; cfg.valType = TAOS_CFG_VTYPE_INT8; @@ -1320,6 +1324,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // pContext in cache + cfg.option = "httpKeepAlive"; + cfg.ptr = &tsHttpKeepAlive; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; + cfg.minValue = 3000; + cfg.maxValue = 3600000; + cfg.ptrLength = 0; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // debug flag cfg.option = "numOfLogLines"; cfg.ptr = &tsNumOfLogLines; @@ -1401,7 +1416,6 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "sdbDebugFlag"; cfg.ptr = &sdbDebugFlag; cfg.valType = TAOS_CFG_VTYPE_INT32; @@ -1633,7 +1647,7 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - // enable kill long query + // enable kill long query cfg.option = "deadLockKillQuery"; cfg.ptr = &tsDeadLockKillQuery; cfg.valType = TAOS_CFG_VTYPE_INT8; @@ -1665,6 +1679,17 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + // child talbe name specified in schemaless tag value + cfg.option = "smlChildTableName"; + cfg.ptr = tsSmlChildTableName; + cfg.valType = TAOS_CFG_VTYPE_STRING; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 0; + cfg.ptrLength = tListLen(tsSmlChildTableName); + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks cfg.option = "walFlushSize"; cfg.ptr = &tsdbWalFlushSize; @@ -1731,21 +1756,18 @@ static void doInitGlobalConfig(void) { #else assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM); #endif - } -void taosInitGlobalCfg() { - pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); -} +void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); } int32_t taosCheckGlobalCfg() { - char fqdn[TSDB_FQDN_LEN]; + char fqdn[TSDB_FQDN_LEN]; uint16_t port; if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) { taosSetAllDebugFlag(); } - + if (tsLocalFqdn[0] == 0) { taosGetFqdn(tsLocalFqdn); } @@ -1772,7 +1794,7 @@ int32_t taosCheckGlobalCfg() { if (taosCheckTmpDir()) { return -1; } - + taosGetSystemInfo(); tsSetLocale(); @@ -1794,8 +1816,8 @@ int32_t taosCheckGlobalCfg() { } if (tsMaxTablePerVnode < tsMinTablePerVnode) { - uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", - tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode); + uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode, + tsMinTablePerVnode, tsMinTablePerVnode); tsMaxTablePerVnode = tsMinTablePerVnode; } @@ -1817,7 +1839,7 @@ int32_t taosCheckGlobalCfg() { } tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035] - tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp + tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp tsSyncPort = tsServerPort + TSDB_PORT_SYNC; tsHttpPort = tsServerPort + TSDB_PORT_HTTP; @@ -1837,17 +1859,17 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) { strcpy(fqdn, ep); char *temp = strchr(fqdn, ':'); - if (temp) { + if (temp) { *temp = 0; - *port = atoi(temp+1); - } - + *port = atoi(temp + 1); + } + if (*port == 0) { *port = tsServerPort; return -1; } - return 0; + return 0; } /* diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index 9ae793ad2d567eb11d10627b65698f612542e988..792ef7c3036f15068796e09883d3f4d47a038fe2 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit 9ae793ad2d567eb11d10627b65698f612542e988 +Subproject commit 792ef7c3036f15068796e09883d3f4d47a038fe2 diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 065dedac63372f5c71146ee9937a6e136d71ce81..c5b59baefedc38fa4bf558526a8c4a1777bfb7bb 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.35-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.36-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 7caf46848d18c4491cdea1ab50df31d8d2d26daf..926a5ef483d9f1da07dbfdeb796567d3ea077c87 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.35 + 2.0.36 jar JDBCDriver diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index a586879afe61b9272712a14f36c60fbd85ba80ed..04115e2a0ebc5924a51862cd9a49a5352cf6a5b6 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.35 + 2.0.36 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -58,6 +58,13 @@ 4.13.1 test + + + commons-logging + commons-logging + 1.2 + test + @@ -70,6 +77,18 @@ + + org.apache.maven.plugins + maven-source-plugin + + + attach-sources + + jar + + + + org.apache.maven.plugins maven-assembly-plugin diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java index f90fa43fa26288943b5fa6c500ace6b92feb8429..748891d943536b3cb6ebd6adffd295573adee4d1 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java @@ -8,16 +8,35 @@ import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; +/** + * @author huolibo@qq.com + * @version v1.0.0 + * @JDK: 1.8 + * @description: this class is an extension of {@link Statement}. use like: + * Statement statement = conn.createStatement(); + * SchemalessStatement schemalessStatement = new SchemalessStatement(statement); + * schemalessStatement.execute(sql); + * schemalessStatement.executeSchemaless(lines, SchemalessProtocolType, SchemalessTimestampType); + * @since 2021-11-03 17:10 + */ public class SchemalessStatement extends AbstractStatementWrapper { public SchemalessStatement(Statement statement) { super(statement); } - public void executeSchemaless(String[] strings, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { + /** + * batch insert schemaless lines + * + * @param lines schemaless data + * @param protocolType schemaless type {@link SchemalessProtocolType} + * @param timestampType Time precision {@link SchemalessTimestampType} + * @throws SQLException execute insert exception + */ + public void executeSchemaless(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { Connection connection = this.getConnection(); if (connection instanceof TSDBConnection) { TSDBConnection tsdbConnection = (TSDBConnection) connection; - tsdbConnection.getConnector().insertLines(strings, protocolType, timestampType); + tsdbConnection.getConnector().insertLines(lines, protocolType, timestampType); } else if (connection instanceof RestfulConnection) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "restful connection is not supported currently"); } else { @@ -25,7 +44,15 @@ public class SchemalessStatement extends AbstractStatementWrapper { } } - public void executeSchemaless(String sql, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { - executeSchemaless(new String[]{sql}, protocolType, timestampType); + /** + * only one insert + * + * @param line schemaless line + * @param protocolType schemaless type {@link SchemalessProtocolType} + * @param timestampType Time precision {@link SchemalessTimestampType} + * @throws SQLException execute insert exception + */ + public void executeSchemaless(String line, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException { + executeSchemaless(new String[]{line}, protocolType, timestampType); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java index 307451e014c59c1c3419f1a9daff4f89e8b90d46..0fef64a6f82706e30677ad4e74604924c5cc2e60 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java @@ -135,7 +135,6 @@ public class TSDBDriver extends AbstractDriver { TSDBJNIConnector.init(props); return new TSDBConnection(props, this.dbMetaData); } catch (SQLWarning sqlWarning) { - sqlWarning.printStackTrace(); return new TSDBConnection(props, this.dbMetaData); } catch (SQLException sqlEx) { throw sqlEx; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index 22fb0c4ae4987ade0a406fe5628bf80d975f3ae5..42ebedf4027b0e333b9e79b8045f1bae0d338ac7 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -36,15 +36,15 @@ import java.util.regex.Pattern; * compatibility needs. */ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { + // for jdbc preparedStatement interface private String rawSql; private Object[] parameters; - - private ArrayList colData; + // for parameter binding + private long nativeStmtHandle = 0; + private String tableName; private ArrayList tableTags; private int tagValueLength; - - private String tableName; - private long nativeStmtHandle = 0; + private ArrayList colData; TSDBPreparedStatement(TSDBConnection connection, String sql) { super(connection); @@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat preprocessSql(); } - /* - * - */ - /** * Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by * the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in @@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public void setObject(int parameterIndex, Object x) throws SQLException { - if (isClosed()) { + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - } - - if (parameterIndex < 1 && parameterIndex >= parameters.length) { + if (parameterIndex < 1 && parameterIndex >= parameters.length) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - } parameters[parameterIndex - 1] = x; } @@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - // TODO: throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - //TODO: throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } @Override @@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat /////////////////////////////////////////////////////////////////////// // NOTE: the following APIs are not JDBC compatible - // set the bind table name + // parameter binding private static class ColumnInfo { @SuppressWarnings("rawtypes") private ArrayList data; @@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat } } - public void setTableName(String name) { + public void setTableName(String name) throws SQLException { + if (this.tableName != null) { + this.columnDataExecuteBatch(); + this.columnDataClearBatchInternal(); + } this.tableName = name; } @@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void columnDataExecuteBatch() throws SQLException { TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); connector.executeBatch(this.nativeStmtHandle); - this.columnDataClearBatch(); + this.columnDataClearBatchInternal(); } + @Deprecated public void columnDataClearBatch() { + columnDataClearBatchInternal(); + } + + private void columnDataClearBatchInternal() { int size = this.colData.size(); this.colData.clear(); - this.colData.addAll(Collections.nCopies(size, null)); this.tableName = null; // clear the table name } + public void columnDataCloseBatch() throws SQLException { TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector(); connector.closeBatch(this.nativeStmtHandle); @@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat this.nativeStmtHandle = 0L; this.tableName = null; } + + @Override + public void close() throws SQLException { + this.columnDataClearBatchInternal(); + this.columnDataCloseBatch(); + super.close(); + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java index d5bd1bde5eb3d73ebd419652ca1fbbe3485d95c5..2a0bea15702a79b3440f95771cf56b879a814626 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java @@ -1,5 +1,7 @@ package com.taosdata.jdbc.enums; +import java.util.Arrays; + public enum SchemalessProtocolType { UNKNOWN, LINE, @@ -7,4 +9,10 @@ public enum SchemalessProtocolType { JSON, ; + public static SchemalessProtocolType parse(String type) { + return Arrays.stream(SchemalessProtocolType.values()) + .filter(protocol -> type.equalsIgnoreCase(protocol.name())) + .findFirst().orElse(UNKNOWN); + } + } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java index 159714262e5386e00e74bb6154a20165a735c174..fa10a23634ec75182365d42ebfb79aff7b14b08f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java @@ -1,7 +1,7 @@ package com.taosdata.jdbc.enums; public enum SchemalessTimestampType { - + // Let the database decide NOT_CONFIGURED, HOURS, MINUTES, diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index d5985756ee1851407bf19a568657fa2127d0be43..36714893e3ca519dea07910a95d5ee1c1b6fb731 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -50,9 +50,13 @@ public class RestfulDriver extends AbstractDriver { String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName()); loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + ""; } catch (UnsupportedEncodingException e) { - e.printStackTrace(); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD)); } + int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE)); + boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE)); + + HttpClientPoolUtil.init(poolSize, keepAlive); String result = HttpClientPoolUtil.execute(loginUrl); JSONObject jsonResult = JSON.parseObject(result); String status = jsonResult.getString("status"); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java index 99e46bc64f44f6326aec12734849cc5ef518c903..fc116b32c2a154c9479e4933d887ac7ddcedbe9f 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java @@ -5,12 +5,11 @@ import com.taosdata.jdbc.TSDBErrorNumbers; import org.apache.http.HeaderElement; import org.apache.http.HeaderElementIterator; import org.apache.http.HttpEntity; -import org.apache.http.NoHttpResponseException; import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.HttpRequestRetryHandler; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.*; import org.apache.http.client.protocol.HttpClientContext; +import org.apache.http.conn.ClientConnectionManager; import org.apache.http.conn.ConnectionKeepAliveStrategy; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; @@ -21,21 +20,20 @@ import org.apache.http.protocol.HTTP; import org.apache.http.protocol.HttpContext; import org.apache.http.util.EntityUtils; -import javax.net.ssl.SSLException; import java.io.IOException; -import java.io.InterruptedIOException; -import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; import java.sql.SQLException; +import java.util.concurrent.TimeUnit; public class HttpClientPoolUtil { private static final String DEFAULT_CONTENT_TYPE = "application/json"; private static final int DEFAULT_MAX_RETRY_COUNT = 5; - private static final int DEFAULT_MAX_TOTAL = 50; - private static final int DEFAULT_MAX_PER_ROUTE = 5; + public static final String DEFAULT_HTTP_KEEP_ALIVE = "true"; + public static final String DEFAULT_MAX_PER_ROUTE = "20"; private static final int DEFAULT_HTTP_KEEP_TIME = -1; + private static String isKeepAlive; private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> { HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE)); @@ -55,36 +53,39 @@ public class HttpClientPoolUtil { private static CloseableHttpClient httpClient; - static { - - PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); - connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL); - connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE); - - httpClient = HttpClients.custom() - .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY) - .setConnectionManager(connectionManager) - .setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT) - .build(); + public static void init(Integer connPoolSize, boolean keepAlive) { + if (httpClient == null) { + synchronized (HttpClientPoolUtil.class) { + if (httpClient == null) { + isKeepAlive = keepAlive ? HTTP.CONN_KEEP_ALIVE : HTTP.CONN_CLOSE; + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + connectionManager.setMaxTotal(connPoolSize * 10); + connectionManager.setDefaultMaxPerRoute(connPoolSize); + httpClient = HttpClients.custom() + .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY) + .setConnectionManager(connectionManager) + .setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT) + .build(); + } + } + } } /*** execute GET request ***/ public static String execute(String uri) throws SQLException { HttpEntity httpEntity = null; String responseBody = ""; - try { - HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); - HttpContext context = HttpClientContext.create(); - CloseableHttpResponse httpResponse = httpClient.execute(method, context); + HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME); + HttpContext context = HttpClientContext.create(); + + try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) { httpEntity = httpResponse.getEntity(); if (httpEntity != null) { responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } } catch (ClientProtocolException e) { - e.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); } catch (IOException exception) { - exception.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); } finally { if (httpEntity != null) { @@ -94,30 +95,27 @@ public class HttpClientPoolUtil { return responseBody; } - /*** execute POST request ***/ public static String execute(String uri, String data, String token) throws SQLException { + + HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME); + method.setHeader(HTTP.CONTENT_TYPE, "text/plain"); + method.setHeader(HTTP.CONN_DIRECTIVE, isKeepAlive); + method.setHeader("Authorization", "Taosd " + token); + method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); + HttpContext context = HttpClientContext.create(); + HttpEntity httpEntity = null; String responseBody = ""; - try { - HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME); - method.setHeader(HTTP.CONTENT_TYPE, "text/plain"); - method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE); - method.setHeader("Authorization", "Taosd " + token); - - method.setEntity(new StringEntity(data, StandardCharsets.UTF_8)); - HttpContext context = HttpClientContext.create(); - CloseableHttpResponse httpResponse = httpClient.execute(method, context); + try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) { httpEntity = httpResponse.getEntity(); if (httpEntity == null) { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_HTTP_ENTITY_IS_NULL, "httpEntity is null, sql: " + data); } responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8); } catch (ClientProtocolException e) { - e.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage()); } catch (IOException exception) { - exception.printStackTrace(); throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage()); } finally { if (httpEntity != null) { @@ -148,4 +146,12 @@ public class HttpClientPoolUtil { return method; } + + public static void reset() { + synchronized (HttpClientPoolUtil.class) { + ClientConnectionManager cm = httpClient.getConnectionManager(); + cm.closeExpiredConnections(); + cm.closeIdleConnections(100, TimeUnit.MILLISECONDS); + } + } } \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java index a427103770cff7f51355024688454824d7263c77..d4664f2678013b3de87bcd3f0dc24631be511ede 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java @@ -16,7 +16,6 @@ public class TaosInfo implements TaosInfoMBean { MBeanServer server = ManagementFactory.getPlatformMBeanServer(); ObjectName name = new ObjectName("TaosInfoMBean:name=TaosInfo"); server.registerMBean(TaosInfo.getInstance(), name); - } catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) { e.printStackTrace(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java index e1c4bddb2812f658336c895249886f603681e632..6cd1ff7200962b7347969e0b8b10443083505912 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java @@ -49,14 +49,9 @@ public class Utils { try { return parseMicroSecTimestamp(timeStampStr); } catch (DateTimeParseException ee) { - try { - return parseNanoSecTimestamp(timeStampStr); - } catch (DateTimeParseException eee) { - eee.printStackTrace(); - } + return parseNanoSecTimestamp(timeStampStr); } } - return null; } private static LocalDateTime parseMilliSecTimestamp(String timeStampStr) throws DateTimeParseException { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java new file mode 100644 index 0000000000000000000000000000000000000000..46f201d1c0a525f52014d133e25fc0db4741050c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java @@ -0,0 +1,139 @@ +package com.taosdata.jdbc; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; + +public class ParameterBindTest { + + private static final String host = "127.0.0.1"; + private static final String stable = "weather"; + + private Connection conn; + private final Random random = new Random(System.currentTimeMillis()); + + @Test + public void test() { + // given + String[] tbnames = {"t1", "t2", "t3"}; + int rows = 10; + + // when + insertIntoTables(tbnames, 10); + + // then + assertRows(stable, tbnames.length * rows); + for (String t : tbnames) { + assertRows(t, rows); + } + } + + @Test + public void testMultiThreads() { + // given + String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}}; + int rows = 10; + + // when + List threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList()); + threads.forEach(Thread::start); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + // then + for (String[] table : tables) { + for (String t : table) { + assertRows(t, rows); + } + } + + } + + private void assertRows(String tbname, int rows) { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select count(*) from " + tbname); + while (rs.next()) { + int count = rs.getInt(1); + Assert.assertEquals(rows, count); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private void insertIntoTables(String[] tbnames, int rowsEachTable) { + long current = System.currentTimeMillis(); + String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)"; + try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) { + for (int i = 0; i < tbnames.length; i++) { + pstmt.setTableName(tbnames[i]); + pstmt.setTagInt(0, random.nextInt(100)); + pstmt.setTagInt(1, random.nextInt(100)); + + ArrayList timestampList = new ArrayList<>(); + for (int j = 0; j < rowsEachTable; j++) { + timestampList.add(current + i * 1000 + j); + } + pstmt.setTimestamp(0, timestampList); + + ArrayList f1List = new ArrayList<>(); + for (int j = 0; j < rowsEachTable; j++) { + f1List.add(random.nextInt(100)); + } + pstmt.setInt(1, f1List); + + ArrayList f2List = new ArrayList<>(); + for (int j = 0; j < rowsEachTable; j++) { + f2List.add(random.nextInt(100)); + } + pstmt.setInt(2, f2List); + + pstmt.columnDataAddBatch(); + } + + pstmt.columnDataExecuteBatch(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() { + String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + try { + conn = DriverManager.getConnection(url); + Statement stmt = conn.createStatement(); + stmt.execute("drop database if exists test_pd"); + stmt.execute("create database if not exists test_pd"); + stmt.execute("use test_pd"); + stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { +// Statement stmt = conn.createStatement(); +// stmt.execute("drop database if exists test_pd"); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java index c706704f67e75ce61f6f96def26c6895e8805a7a..fd4ac12ce40dc02f2b6ffbf91e33b0e0bd2398a9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java @@ -1,5 +1,7 @@ package com.taosdata.jdbc; +import com.alibaba.fastjson.JSONArray; +import com.alibaba.fastjson.JSONObject; import com.taosdata.jdbc.enums.SchemalessProtocolType; import com.taosdata.jdbc.enums.SchemalessTimestampType; import org.junit.After; @@ -10,10 +12,14 @@ import org.junit.Test; import java.sql.*; public class SchemalessInsertTest { - private String host = "127.0.0.1"; - private String dbname = "test_schemaless_insert"; + private final String dbname = "test_schemaless_insert"; private Connection conn; + /** + * schemaless insert compatible with influxdb + * + * @throws SQLException execute error + */ @Test public void schemalessInsert() throws SQLException { // given @@ -41,6 +47,11 @@ public class SchemalessInsertTest { statement.close(); } + /** + * telnet insert compatible with opentsdb + * + * @throws SQLException execute error + */ @Test public void telnetInsert() throws SQLException { // given @@ -71,6 +82,11 @@ public class SchemalessInsertTest { statement.close(); } + /** + * json insert compatible with opentsdb json format + * + * @throws SQLException execute error + */ @Test public void jsonInsert() throws SQLException { // given @@ -113,13 +129,15 @@ public class SchemalessInsertTest { while (rs.next()) { rowCnt++; } -// Assert.assertEquals(json.length, rowCnt); + + Assert.assertEquals(((JSONArray) JSONObject.parse(json)).size(), rowCnt); rs.close(); statement.close(); } @Before public void before() { + String host = "127.0.0.1"; final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; try { conn = DriverManager.getConnection(url); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java new file mode 100644 index 0000000000000000000000000000000000000000..30fc2fa76597c30b905db5c9d49815189d71aaa3 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java @@ -0,0 +1,57 @@ +package com.taosdata.jdbc.rs; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +public class HttpKeepAliveTest { + + private static final String host = "127.0.0.1"; + + @Test + public void test() throws SQLException { + //given + int multi = 4000; + AtomicInteger exceptionCount = new AtomicInteger(); + + //when + Properties props = new Properties(); + props.setProperty("httpKeepAlive", "false"); + props.setProperty("httpPoolSize", "20"); + Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", props); + + List threads = IntStream.range(0, multi).mapToObj(i -> new Thread( + () -> { + try (Statement stmt = connection.createStatement()) { + stmt.execute("insert into log.tb_not_exists values(now, 1)"); + stmt.execute("select last(*) from log.dn"); + } catch (SQLException throwables) { + exceptionCount.getAndIncrement(); + } + } + )).collect(Collectors.toList()); + + threads.forEach(Thread::start); + + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + //then + Assert.assertEquals(multi, exceptionCount.get()); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java index 693a8f8eb42a29db1d3dd5120dbcb632acc28bb4..a78284b7a2ecf1b43b96180fa9d819e89ecdc595 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java @@ -6,8 +6,7 @@ import java.sql.*; public class WasNullTest { - // private static final String host = "127.0.0.1"; - private static final String host = "master"; + private static final String host = "127.0.0.1"; private Connection conn; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java index cae33f18e7a04e443092d8e696bb32be9600a435..7ba1607fdd32a594bca22528dee48d902736c703 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java @@ -2,8 +2,6 @@ package com.taosdata.jdbc.utils; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONObject; -import com.taosdata.jdbc.TSDBDriver; -import com.taosdata.jdbc.TSDBError; import org.junit.Test; import java.io.UnsupportedEncodingException; @@ -11,7 +9,6 @@ import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.sql.SQLException; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -20,18 +17,21 @@ public class HttpClientPoolUtilTest { String user = "root"; String password = "taosdata"; String host = "127.0.0.1"; - String dbname = "log"; +// String host = "master"; @Test - public void test() { + public void useLog() { // given - List threads = IntStream.range(0, 4000).mapToObj(i -> new Thread(() -> { - useDB(); -// try { -// TimeUnit.SECONDS.sleep(10); -// } catch (InterruptedException e) { -// e.printStackTrace(); -// } + int multi = 10; + + // when + List threads = IntStream.range(0, multi).mapToObj(i -> new Thread(() -> { + try { + String token = login(multi); + executeOneSql("use log", token); + } catch (SQLException | UnsupportedEncodingException e) { + e.printStackTrace(); + } })).collect(Collectors.toList()); threads.forEach(Thread::start); @@ -43,34 +43,62 @@ public class HttpClientPoolUtilTest { e.printStackTrace(); } } - } - private void useDB() { - try { - user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName()); - password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName()); - String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + ""; - String result = HttpClientPoolUtil.execute(loginUrl); - JSONObject jsonResult = JSON.parseObject(result); - String status = jsonResult.getString("status"); - String token = jsonResult.getString("desc"); - if (!status.equals("succ")) { - throw new SQLException(jsonResult.getString("desc")); + @Test + public void tableNotExist() { + // given + int multi = 20; + + // when + List threads = IntStream.range(0, multi * 25).mapToObj(i -> new Thread(() -> { + try { +// String token = "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"; + String token = login(multi); + executeOneSql("insert into log.tb_not_exist values(now, 1)", token); + executeOneSql("select last(*) from log.dn", token); + } catch (SQLException | UnsupportedEncodingException e) { + e.printStackTrace(); } + })).collect(Collectors.toList()); - String url = "http://" + host + ":6041/rest/sql"; - String sql = "use " + dbname; - result = HttpClientPoolUtil.execute(url, sql, token); + threads.forEach(Thread::start); - JSONObject resultJson = JSON.parseObject(result); - if (resultJson.getString("status").equals("error")) { - throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); + for (Thread thread : threads) { + try { + thread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); } - } catch (UnsupportedEncodingException | SQLException e) { - e.printStackTrace(); } } + private String login(int connPoolSize) throws SQLException, UnsupportedEncodingException { + user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName()); + password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName()); + String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + ""; + HttpClientPoolUtil.init(connPoolSize, false); + String result = HttpClientPoolUtil.execute(loginUrl); + JSONObject jsonResult = JSON.parseObject(result); + String status = jsonResult.getString("status"); + String token = jsonResult.getString("desc"); + if (!status.equals("succ")) { + throw new SQLException(jsonResult.getString("desc")); + } + return token; + } + + private boolean executeOneSql(String sql, String token) throws SQLException { + String url = "http://" + host + ":6041/rest/sql"; + String result = HttpClientPoolUtil.execute(url, sql, token); + JSONObject resultJson = JSON.parseObject(result); + if (resultJson.getString("status").equals("error")) { +// HttpClientPoolUtil.reset(); +// throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc")); + return false; + } + return true; + } + } \ No newline at end of file diff --git a/src/connector/jdbc/src/test/resources/commons-logging.properties b/src/connector/jdbc/src/test/resources/commons-logging.properties new file mode 100644 index 0000000000000000000000000000000000000000..ac435a2a1bd64ca9925948d486b453638cb8caac --- /dev/null +++ b/src/connector/jdbc/src/test/resources/commons-logging.properties @@ -0,0 +1,2 @@ +#org.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog + diff --git a/src/connector/jdbc/src/test/resources/simplelog.properties b/src/connector/jdbc/src/test/resources/simplelog.properties new file mode 100644 index 0000000000000000000000000000000000000000..abcc1ef6d56112c892377ca47453b65ed924a9a9 --- /dev/null +++ b/src/connector/jdbc/src/test/resources/simplelog.properties @@ -0,0 +1,5 @@ +org.apache.commons.logging.simplelog.defaultlog=TRACE +org.apache.commons.logging.simplelog.showlogname=true +org.apache.commons.logging.simplelog.showShortLogname=restful +org.apache.commons.logging.simplelog.showdatetime=true +org.apache.commons.logging.simplelog.dateTimeFormat=yyyy-mm-dd hh:MM:ss.SSS \ No newline at end of file diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 5ba2739c35b1f0aef61ba3e52ae5d2f3a901df77..3c395ec205a9c39b3c6e62532de536feef093544 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -12,6 +12,7 @@ const FieldTypes = require('./constants'); const errors = require('./error'); const TaosObjects = require('./taosobjects'); const { NULL_POINTER } = require('ref-napi'); +const { Console } = require('console'); module.exports = CTaosInterface; @@ -53,6 +54,18 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0 } return res; } +function convertTinyintUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readUIntLE(currOffset, 1); + res.push(d == FieldTypes.C_TINYINT_UNSIGNED_NULL ? null : d); + currOffset += nbytes; + } + return res; +} + function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -64,6 +77,18 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = } return res; } +function convertSmallintUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readUIntLE(currOffset, 2); + res.push(d == FieldTypes.C_SMALLINT_UNSIGNED_NULL ? null : d); + currOffset += nbytes; + } + return res; +} + function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -75,6 +100,19 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { } return res; } +function convertIntUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readUInt32LE(currOffset); + res.push(d == FieldTypes.C_INT_UNSIGNED_NULL ? null : d); + currOffset += nbytes; + } + return res; +} + + function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -86,6 +124,19 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) } return res; } +function convertBigintUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { + data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); + let res = []; + let currOffset = 0; + while (currOffset < data.length) { + let d = data.readUInt64LE(currOffset); + res.push(d == FieldTypes.C_BIGINT_UNSIGNED_NULL ? null : BigInt(d)); + currOffset += nbytes; + } + return res; +} + + function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) { data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset); let res = []; @@ -156,7 +207,11 @@ let convertFunctions = { [FieldTypes.C_DOUBLE]: convertDouble, [FieldTypes.C_BINARY]: convertBinary, [FieldTypes.C_TIMESTAMP]: convertTimestamp, - [FieldTypes.C_NCHAR]: convertNchar + [FieldTypes.C_NCHAR]: convertNchar, + [FieldTypes.C_TINYINT_UNSIGNED]: convertTinyintUnsigned, + [FieldTypes.C_SMALLINT_UNSIGNED]: convertSmallintUnsigned, + [FieldTypes.C_INT_UNSIGNED]: convertIntUnsigned, + [FieldTypes.C_BIGINT_UNSIGNED]: convertBigintUnsigned } // Define TaosField structure @@ -321,6 +376,7 @@ CTaosInterface.prototype.close = function close(connection) { CTaosInterface.prototype.query = function query(connection, sql) { return this.libtaos.taos_query(connection, ref.allocCString(sql)); } + CTaosInterface.prototype.affectedRows = function affectedRows(result) { return this.libtaos.taos_affected_rows(result); } @@ -413,6 +469,7 @@ CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, p this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param); return param; } + /** Asynchrnously fetches the next block of rows. Wraps callback and transfers a 4th argument to the cursor, the row data as blocks in javascript form * Note: This isn't a recursive function, in order to fetch all data either use the TDengine cursor object, TaosQuery object, or implement a recrusive * function yourself using the libtaos.taos_fetch_rows_a function diff --git a/src/connector/nodejs/nodetaos/constants.js b/src/connector/nodejs/nodetaos/constants.js index cd6a0c9fbaff51e7f0ecd3ab06907b7b1fb7dcb1..3a866315507371fdfc69efb6de550b7c21f660b7 100644 --- a/src/connector/nodejs/nodetaos/constants.js +++ b/src/connector/nodejs/nodetaos/constants.js @@ -36,13 +36,21 @@ module.exports = { C_BINARY : 8, C_TIMESTAMP : 9, C_NCHAR : 10, + C_TINYINT_UNSIGNED : 11, + C_SMALLINT_UNSIGNED : 12, + C_INT_UNSIGNED : 13, + C_BIGINT_UNSIGNED : 14, // NULL value definition // NOTE: These values should change according to C definition in tsdb.h C_BOOL_NULL : 2, C_TINYINT_NULL : -128, + C_TINYINT_UNSIGNED_NULL : 255, C_SMALLINT_NULL : -32768, + C_SMALLINT_UNSIGNED_NULL : 65535, C_INT_NULL : -2147483648, - C_BIGINT_NULL : -9223372036854775808, + C_INT_UNSIGNED_NULL : 4294967295, + C_BIGINT_NULL : -9223372036854775808n, + C_BIGINT_UNSIGNED_NULL : 18446744073709551615n, C_FLOAT_NULL : 2146435072, C_DOUBLE_NULL : -9223370937343148032, C_NCHAR_NULL : 4294967295, @@ -64,6 +72,10 @@ const typeCodesToName = { 8 : 'Binary', 9 : 'Timestamp', 10 : 'Nchar', + 11 : 'TINYINT_UNSIGNED', + 12 : 'SMALLINT_UNSIGNED', + 13 : 'INT_UNSIGNED', + 14 : 'BIGINT_UNSIGNED', } /** diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index 6a2c66100b3d1921b3ce8997e70d33f024e5c3f2..711db94b84fab40d8d1809a44c45b24a9ab5bafb 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -7,7 +7,7 @@ "test": "test" }, "scripts": { - "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js" + "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js && node test/testUnsignedType.js " }, "repository": { "type": "git", diff --git a/src/connector/nodejs/test/test.js b/src/connector/nodejs/test/test.js index caf05955da4c960ebedc872f400c17d18be767dd..06adf912a57bfa369b9567d0b5b3a1c8fb105ce8 100644 --- a/src/connector/nodejs/test/test.js +++ b/src/connector/nodejs/test/test.js @@ -90,7 +90,7 @@ c1.execute("create table if not exists td_connector_test.weather(ts timestamp, t c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)"); c1.execute("insert into t1(ts, temperature) values(now, 22.22)"); c1.execute("insert into t1(ts, humidity) values(now, 33)"); -c1.query('select * from test.t1', true).then(function (result) { +c1.query('select * from td_connector_test.t1', true).then(function (result) { result.pretty(); }); diff --git a/src/connector/nodejs/test/testUnsignedType.js b/src/connector/nodejs/test/testUnsignedType.js new file mode 100644 index 0000000000000000000000000000000000000000..82413afebad0b75116fe3ea46e50716843d87c84 --- /dev/null +++ b/src/connector/nodejs/test/testUnsignedType.js @@ -0,0 +1,26 @@ +const taos = require('../tdengine'); +var conn = taos.connect({ host: "127.0.0.1", user: "root", password: "taosdata", config: "/etc/taos", port: 10 }); +var c1 = conn.cursor(); +executeUpdate("create database nodedb;"); +executeUpdate("use nodedb;"); +executeUpdate("create table unsigntest(ts timestamp,ut tinyint unsigned,us smallint unsigned,ui int unsigned,ub bigint unsigned,bi bigint);"); +executeUpdate("insert into unsigntest values (now, 254,65534,4294967294,18446744073709551614,9223372036854775807);"); +executeUpdate("insert into unsigntest values (now, 0,0,0,0,-9223372036854775807);"); +executeQuery("select * from unsigntest;"); +executeUpdate("drop database nodedb;"); + + +function executeUpdate(sql) { + console.log(sql); + c1.execute(sql); +} +function executeQuery(sql) { + c1.execute(sql) + var data = c1.fetchall(); + // Latest query's Field metadata is stored in cursor.fields + console.log(c1.fields); + // Latest query's result data is stored in cursor.data, also returned by fetchall. + console.log(c1.data); +} +setTimeout(()=>conn.close(),2000); + diff --git a/src/connector/python/README.md b/src/connector/python/README.md index b5d841601f20fbad5bdc1464d5d83f512b25dfc4..679735131105739ae59940c29b51f57496a2057d 100644 --- a/src/connector/python/README.md +++ b/src/connector/python/README.md @@ -5,14 +5,27 @@ ## Install -```sh -git clone --depth 1 https://github.com/taosdata/TDengine.git -pip install ./TDengine/src/connector/python +You can use `pip` to install the connector from PyPI: + +```bash +pip install taospy +``` + +Or with git url: + +```bash +pip install git+https://github.com/taosdata/taos-connector-python.git +``` + +If you have installed TDengine server or client with prebuilt packages, then you can install the connector from path: + +```bash +pip install /usr/local/taos/connector/python ``` ## Source Code -[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python). +[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/taos-connector-python). ## Examples diff --git a/src/connector/python/pyproject.toml b/src/connector/python/pyproject.toml index da61cccf49429251d49f2cba495e24e146244c85..69e3351712b647712a88d7067545ea12ed86506d 100644 --- a/src/connector/python/pyproject.toml +++ b/src/connector/python/pyproject.toml @@ -1,10 +1,13 @@ [tool.poetry] -name = "taos" -version = "2.1.1" +name = "taospy" +version = "2.1.2" description = "TDengine connector for python" authors = ["Taosdata Inc. "] license = "AGPL-3.0" readme = "README.md" +packages = [ + {include = "taos"} +] [tool.poetry.dependencies] python = "^2.7 || ^3.4" @@ -12,12 +15,12 @@ typing = "*" [tool.poetry.dev-dependencies] pytest = [ - { version = "^4.6", python = "^2.7" }, - { version = "^6.2", python = "^3.7" } + { version = "^4.6", python = ">=2.7,<3.0" }, + { version = "^6.2", python = ">=3.7,<4.0" } ] pdoc = { version = "^7.1.1", python = "^3.7" } mypy = { version = "^0.910", python = "^3.6" } -black = { version = "^21.7b0", python = "^3.6" } +black = [{ version = "^21.*", python = ">=3.6.2,<4.0" }] [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py index 2520984e78fad236227d9cf55c29ace92878d3bf..7ebfa8adef6a82c979ad0544a3eb11ccd351b760 100644 --- a/src/connector/python/taos/__init__.py +++ b/src/connector/python/taos/__init__.py @@ -442,18 +442,14 @@ from .statement import * from .subscription import * from .schemaless import * -try: - import importlib.metadata - - __version__ = importlib.metadata.version("taos") -except: - None +from taos._version import __version__ # Globals threadsafety = 0 paramstyle = "pyformat" __all__ = [ + "__version__", # functions "connect", "new_bind_param", diff --git a/src/connector/python/taos/_version.py b/src/connector/python/taos/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..f811561263c557cf534e90ff763373bccacb20b6 --- /dev/null +++ b/src/connector/python/taos/_version.py @@ -0,0 +1 @@ +__version__ = '2.1.2' diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py index 4365c7eabc509f95525078378ff76d46a884c075..37bc90d4c63fe3f75b12d46bb1bf535441869938 100644 --- a/src/connector/python/taos/cinterface.py +++ b/src/connector/python/taos/cinterface.py @@ -2,8 +2,9 @@ import ctypes import platform -import sys +import inspect from ctypes import * + try: from typing import Any except: @@ -14,6 +15,7 @@ from .bind import * from .field import * from .schemaless import * +_UNSUPPORTED = {} # stream callback stream_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p) @@ -47,10 +49,13 @@ def _load_taos(): "Darwin": _load_taos_darwin, "Windows": _load_taos_windows, } + pf = platform.system() + if load_func[pf] is None: + raise InterfaceError("unsupported platform: %s" % pf) try: - return load_func[platform.system()]() - except: - raise InterfaceError('unsupported platform or failed to load taos client library') + return load_func[pf]() + except Exception as err: + raise InterfaceError("unable to load taos C library: %s" % err) _libtaos = _load_taos() @@ -65,7 +70,6 @@ _libtaos.taos_consume.restype = ctypes.c_void_p _libtaos.taos_fetch_lengths.restype = ctypes.POINTER(ctypes.c_int) _libtaos.taos_free_result.restype = None _libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p) -_libtaos.taos_schemaless_insert.restype = ctypes.c_void_p try: _libtaos.taos_stmt_errstr.restype = c_char_p @@ -181,6 +185,7 @@ def taos_connect(host=None, user="root", password="taosdata", db=None, port=0): raise ConnectionError("connect to TDengine failed") return connection + _libtaos.taos_connect_auth.restype = c_void_p _libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16 @@ -236,6 +241,7 @@ def taos_connect_auth(host=None, user="root", auth="", db=None, port=0): raise ConnectionError("connect to TDengine failed") return connection + _libtaos.taos_query.restype = c_void_p _libtaos.taos_query.argtypes = c_void_p, c_char_p @@ -287,6 +293,7 @@ def taos_affected_rows(result): """The affected rows after runing query""" return _libtaos.taos_affected_rows(result) + subscribe_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int) _libtaos.taos_subscribe.restype = c_void_p # _libtaos.taos_subscribe.argtypes = c_void_p, c_int, c_char_p, c_char_p, subscribe_callback_type, c_void_p, c_int @@ -317,7 +324,7 @@ def taos_subscribe(connection, restart, topic, sql, interval, callback=None, par _libtaos.taos_consume.restype = c_void_p -_libtaos.taos_consume.argstype = c_void_p, +_libtaos.taos_consume.argstype = (c_void_p,) def taos_consume(sub): @@ -503,13 +510,17 @@ def taos_stop_query(result): return _libtaos.taos_stop_query(result) -_libtaos.taos_load_table_info.restype = c_int -_libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p) +try: + _libtaos.taos_load_table_info.restype = c_int + _libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p) +except Exception as err: + _UNSUPPORTED["taos_open_stream"] = err def taos_load_table_info(connection, tables): # type: (ctypes.c_void_p, str) -> None """Stop current query""" + _check_if_supported() errno = _libtaos.taos_load_table_info(connection, c_char_p(tables.encode("utf-8"))) if errno != 0: msg = taos_errstr() @@ -562,12 +573,13 @@ def taos_select_db(connection, db): try: _libtaos.taos_open_stream.restype = c_void_p _libtaos.taos_open_stream.argstype = c_void_p, c_char_p, stream_callback_type, c_int64, c_void_p, Any -except: - pass +except Exception as err: + _UNSUPPORTED["taos_open_stream"] = err def taos_open_stream(connection, sql, callback, stime=0, param=None, callback2=None): # type: (ctypes.c_void_p, str, stream_callback_type, c_int64, c_void_p, c_void_p) -> ctypes.pointer + _check_if_supported() if callback2 != None: callback2 = stream_callback2_type(callback2) """Open an stream""" @@ -600,6 +612,7 @@ def taos_stmt_init(connection): """ return c_void_p(_libtaos.taos_stmt_init(connection)) + _libtaos.taos_stmt_prepare.restype = c_int _libtaos.taos_stmt_prepare.argstype = (c_void_p, c_char_p, c_int) @@ -618,6 +631,7 @@ def taos_stmt_prepare(stmt, sql): _libtaos.taos_stmt_close.restype = c_int _libtaos.taos_stmt_close.argstype = (c_void_p,) + def taos_stmt_close(stmt): # type: (ctypes.c_void_p) -> None """Close a statement query @@ -627,17 +641,12 @@ def taos_stmt_close(stmt): if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) -try: - _libtaos.taos_stmt_errstr.restype = c_char_p - _libtaos.taos_stmt_errstr.argstype = (c_void_p,) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info()) try: _libtaos.taos_stmt_errstr.restype = c_char_p _libtaos.taos_stmt_errstr.argstype = (c_void_p,) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_stmt_set_tbname"] = err def taos_stmt_errstr(stmt): @@ -645,16 +654,17 @@ def taos_stmt_errstr(stmt): """Get error message from stetement query @stmt: c_void_p TAOS_STMT* """ + _check_if_supported() err = c_char_p(_libtaos.taos_stmt_errstr(stmt)) if err: return err.value.decode("utf-8") + try: _libtaos.taos_stmt_set_tbname.restype = c_int _libtaos.taos_stmt_set_tbname.argstype = (c_void_p, c_char_p) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname" % taos_get_client_info()) - +except Exception as err: + _UNSUPPORTED["taos_stmt_set_tbname"] = err def taos_stmt_set_tbname(stmt, name): @@ -662,15 +672,17 @@ def taos_stmt_set_tbname(stmt, name): """Set table name of a statement query if exists. @stmt: c_void_p TAOS_STMT* """ + _check_if_supported() res = _libtaos.taos_stmt_set_tbname(stmt, c_char_p(name.encode("utf-8"))) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + try: _libtaos.taos_stmt_set_tbname_tags.restype = c_int _libtaos.taos_stmt_set_tbname_tags.argstype = (c_void_p, c_char_p, c_void_p) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname_tags" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_stmt_set_tbname_tags"] = err def taos_stmt_set_tbname_tags(stmt, name, tags): @@ -678,11 +690,13 @@ def taos_stmt_set_tbname_tags(stmt, name, tags): """Set table name with tags bind params. @stmt: c_void_p TAOS_STMT* """ + _check_if_supported() res = _libtaos.taos_stmt_set_tbname_tags(stmt, ctypes.c_char_p(name.encode("utf-8")), tags) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + _libtaos.taos_stmt_is_insert.restype = c_int _libtaos.taos_stmt_is_insert.argstype = (c_void_p, POINTER(c_int)) @@ -702,6 +716,7 @@ def taos_stmt_is_insert(stmt): _libtaos.taos_stmt_num_params.restype = c_int _libtaos.taos_stmt_num_params.argstype = (c_void_p, POINTER(c_int)) + def taos_stmt_num_params(stmt): # type: (ctypes.c_void_p) -> int """Params number of the current statement query. @@ -713,6 +728,7 @@ def taos_stmt_num_params(stmt): raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) return num_params.value + _libtaos.taos_stmt_bind_param.restype = c_int _libtaos.taos_stmt_bind_param.argstype = (c_void_p, c_void_p) @@ -729,12 +745,12 @@ def taos_stmt_bind_param(stmt, bind): if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + try: _libtaos.taos_stmt_bind_param_batch.restype = c_int _libtaos.taos_stmt_bind_param_batch.argstype = (c_void_p, c_void_p) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_bind_param_batch" % taos_get_client_info()) - +except Exception as err: + _UNSUPPORTED["taos_stmt_bind_param_batch"] = err def taos_stmt_bind_param_batch(stmt, bind): @@ -745,15 +761,17 @@ def taos_stmt_bind_param_batch(stmt, bind): """ # ptr = ctypes.cast(bind, POINTER(TaosMultiBind)) # ptr = pointer(bind) + _check_if_supported() res = _libtaos.taos_stmt_bind_param_batch(stmt, bind) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) + try: _libtaos.taos_stmt_bind_single_param_batch.restype = c_int _libtaos.taos_stmt_bind_single_param_batch.argstype = (c_void_p, c_void_p, c_int) -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_stmt_bind_single_param_batch" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_stmt_bind_single_param_batch"] = err def taos_stmt_bind_single_param_batch(stmt, bind, col): @@ -763,6 +781,7 @@ def taos_stmt_bind_single_param_batch(stmt, bind, col): @bind: TAOS_MULTI_BIND* @col: column index """ + _check_if_supported() res = _libtaos.taos_stmt_bind_single_param_batch(stmt, bind, col) if res != 0: raise StatementError(msg=taos_stmt_errstr(stmt), errno=res) @@ -810,14 +829,17 @@ def taos_stmt_use_result(stmt): raise StatementError(taos_stmt_errstr(stmt)) return result + try: _libtaos.taos_schemaless_insert.restype = c_void_p _libtaos.taos_schemaless_insert.argstype = c_void_p, c_void_p, c_int, c_int, c_int -except AttributeError: - print("WARNING: libtaos(%s) does not support taos_schemaless_insert" % taos_get_client_info()) +except Exception as err: + _UNSUPPORTED["taos_schemaless_insert"] = err + def taos_schemaless_insert(connection, lines, protocol, precision): # type: (c_void_p, list[str] | tuple(str), SmlProtocol, SmlPrecision) -> int + _check_if_supported() num_of_lines = len(lines) lines = (c_char_p(line.encode("utf-8")) for line in lines) lines_type = ctypes.c_char_p * num_of_lines @@ -833,6 +855,18 @@ def taos_schemaless_insert(connection, lines, protocol, precision): taos_free_result(res) return affected_rows + +def _check_if_supported(): + func = inspect.stack()[1][3] + if func in _UNSUPPORTED: + raise InterfaceError("C function %s is not supported in v%s: %s" % (func, taos_get_client_info(), _UNSUPPORTED[func])) + + +def unsupported_methods(): + for m, e in range(_UNSUPPORTED): + print("unsupported %s: %s", m, e) + + class CTaosInterface(object): def __init__(self, config=None): """ diff --git a/src/inc/taos.h b/src/inc/taos.h index 4afec942ff991ce1009cb8c54113562f93f9c92d..6cd62d3177d2490c5c89bf910e258c956c2f69fc 100644 --- a/src/inc/taos.h +++ b/src/inc/taos.h @@ -175,11 +175,13 @@ DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); DLL_EXPORT void taos_stop_query(TAOS_RES *res); DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); +DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); - DLL_EXPORT int* taos_fetch_lengths(TAOS_RES *res); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT void taos_reset_current_db(TAOS *taos); + // TAOS_RES *taos_list_tables(TAOS *mysql, const char *wild); // TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild); @@ -192,7 +194,6 @@ DLL_EXPORT int taos_errno(TAOS_RES *tres); DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param); DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param); -//DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param); typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code); DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval); diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index dfdd016bb66244394310e4c34e689c3428d8914b..c6d587fe1a296bc40ab804cdef160b70da273fd8 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -400,7 +400,7 @@ typedef struct SColIndex { int16_t colId; // column id int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag uint16_t flag; // denote if it is a tag or a normal column - char name[TSDB_COL_NAME_LEN + TSDB_DB_NAME_LEN + 1]; + char name[TSDB_COL_NAME_LEN + TSDB_TABLE_NAME_LEN + 1]; } SColIndex; typedef struct SColumnFilterInfo { diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c index 38abb423cfd2c0329dad24244a798f0617b4cbb6..b3a07b257cbfdd639d6834e7981fb10e89e43512 100644 --- a/src/kit/shell/src/shellImport.c +++ b/src/kit/shell/src/shellImport.c @@ -25,7 +25,7 @@ static char **shellSQLFiles = NULL; static int32_t shellSQLFileNum = 0; -static char shellTablesSQLFile[TSDB_FILENAME_LEN] = {0}; +static char shellTablesSQLFile[4096] = {0}; typedef struct { pthread_t threadID; diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 884127fad8ceb3e3f85dd0350fd5723a270df251..c2b8479f19d778f030101a8d9fb5ac537ca0475c 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -10969,7 +10969,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } int64_t ntables = 0; - uint64_t tableFrom; + uint64_t tableFrom = 0; if (stbInfo) { if (stbInfo->iface != SML_IFACE) { diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt index 5b48374e8f7d54bef4d199ff9398aaf6a74b257e..1daff0c75956072e02f8439acac2850b9315235a 100644 --- a/src/kit/taosdump/CMakeLists.txt +++ b/src/kit/taosdump/CMakeLists.txt @@ -3,6 +3,7 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc) +INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(. SRC) @@ -61,12 +62,22 @@ ENDIF () MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") +LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64) + IF (TD_LINUX) ADD_EXECUTABLE(taosdump ${SRC}) IF (TD_SOMODE_STATIC) - TARGET_LINK_LIBRARIES(taosdump taos_static cJson) + IF (TD_AVRO_SUPPORT) + TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos_static) + ENDIF() ELSE () - TARGET_LINK_LIBRARIES(taosdump taos cJson) + IF (TD_AVRO_SUPPORT) + TARGET_LINK_LIBRARIES(taosdump taos avro jansson) + ELSE () + TARGET_LINK_LIBRARIES(taosdump taos) + ENDIF () ENDIF () ENDIF () @@ -74,8 +85,8 @@ IF (TD_DARWIN) # missing for macosx # ADD_EXECUTABLE(taosdump ${SRC}) # IF (TD_SOMODE_STATIC) - # TARGET_LINK_LIBRARIES(taosdump taos_static cJson) + # TARGET_LINK_LIBRARIES(taosdump taos_static jansson) # ELSE () - # TARGET_LINK_LIBRARIES(taosdump taos cJson) + # TARGET_LINK_LIBRARIES(taosdump taos jansson) # ENDIF () ENDIF () diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 69ec2968218a9e5b2ca34551c60b6c44256298d2..d552e6123fd6d3e496006a0cb79f662d5c139cc1 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -28,15 +28,24 @@ #include "tsdb.h" #include "tutil.h" -#define AVRO_SUPPORT 0 -#if AVRO_SUPPORT == 1 +static char **g_tsDumpInSqlFiles = NULL; +static char g_tsCharset[63] = {0}; + +#ifdef AVRO_SUPPORT #include -#endif +#include + +static char **g_tsDumpInAvroFiles = NULL; + +static void print_json_aux(json_t *element, int indent); + +#endif /* AVRO_SUPPORT */ #define TSDB_SUPPORT_NANOSECOND 1 #define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255 +#define MAX_PATH_LEN 4096 // max path length on linux is 4095 #define COMMAND_SIZE 65536 #define MAX_RECORDS_PER_REQ 32766 //#define DEFAULT_DUMP_FILE "taosdump.sql" @@ -46,8 +55,6 @@ static int converStringToReadable(char *str, int size, char *buf, int bufsize); static int convertNCharToReadable(char *str, int size, char *buf, int bufsize); -static void dumpCharset(FILE *fp); -static void loadFileCharset(FILE *fp, char *fcharset); typedef struct { short bytes; @@ -64,7 +71,12 @@ typedef struct { #define performancePrint(fmt, ...) \ do { if (g_args.performance_print) \ - fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0) + fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0) + +#define warnPrint(fmt, ...) \ + do { fprintf(stderr, "\033[33m"); \ + fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \ + fprintf(stderr, "\033[0m"); } while(0) #define errorPrint(fmt, ...) \ do { fprintf(stderr, "\033[31m"); \ @@ -208,14 +220,13 @@ typedef struct { typedef struct { pthread_t threadID; int32_t threadIndex; - int32_t totalThreads; char dbName[TSDB_DB_NAME_LEN]; char stbName[TSDB_TABLE_NAME_LEN]; int precision; TAOS *taos; int64_t rowsOfDumpOut; - int64_t tablesOfDumpOut; - int64_t tableFrom; + int64_t count; + int64_t from; } threadInfo; typedef struct { @@ -225,6 +236,44 @@ typedef struct { int32_t totalDatabasesOfDumpOut; } resultStatistics; +#ifdef AVRO_SUPPORT + +enum enAvro_Codec { + AVRO_CODEC_START = 0, + AVRO_CODEC_NULL = AVRO_CODEC_START, + AVRO_CODEC_DEFLATE, + AVRO_CODEC_SNAPPY, + AVRO_CODEC_LZMA, + AVRO_CODEC_UNKNOWN = 255 +}; + +char *g_avro_codec[] = { + "null", + "deflate", + "snappy", + "lzma", + "unknown" +}; + +/* avro sectin begin */ +#define RECORD_NAME_LEN 64 +#define FIELD_NAME_LEN 64 +#define TYPE_NAME_LEN 16 + +typedef struct FieldStruct_S { + char name[FIELD_NAME_LEN]; + char type[TYPE_NAME_LEN]; +} FieldStruct; + +typedef struct RecordSchema_S { + char name[RECORD_NAME_LEN]; + char *fields; + int num_fields; +} RecordSchema; + +/* avro section end */ +#endif + static int64_t g_totalDumpOutRows = 0; SDbInfo **g_dbInfos = NULL; @@ -276,14 +325,17 @@ static struct argp_option options[] = { // dump format options {"schemaonly", 's', 0, 0, "Only dump schema.", 2}, {"without-property", 'N', 0, 0, "Dump schema without properties.", 2}, - {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2}, - {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4}, - {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5}, - {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3}, - {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3}, - {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3}, - {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3}, - {"debug", 'g', 0, 0, "Print debug info.", 8}, +#ifdef AVRO_SUPPORT + {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3}, + {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4}, +#endif + {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8}, + {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9}, + {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10}, + {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10}, + {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10}, + {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10}, + {"debug", 'g', 0, 0, "Print debug info.", 15}, {0} }; @@ -310,7 +362,10 @@ typedef struct arguments { // dump format option bool schemaonly; bool with_property; +#ifdef AVRO_SUPPORT bool avro; + int avro_codec; +#endif int64_t start_time; char humanStartTime[HUMAN_TIME_LEN]; int64_t end_time; @@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0}; static FILE *g_fpOfResult = NULL; static int g_numOfCores = 1; -static int dumpOut(); -static int dumpIn(); -static void dumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, - FILE *fp); -static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, - FILE *fp, char* dbName); -static int getTableDes( - char* dbName, char *table, - TableDef *stableDes, bool isSuperTable); -static int64_t dumpTableData(FILE *fp, char *tbName, - char* dbName, - int precision, - char *jsonAvroSchema); -static int checkParam(); -static void freeDbInfos(); - struct arguments g_args = { // connection option NULL, @@ -381,7 +420,10 @@ struct arguments g_args = { // dump format option false, // schemaonly true, // with_property - false, // avro format +#ifdef AVRO_SUPPORT + false, // avro + AVRO_CODEC_SNAPPY, // avro_codec +#endif -INT64_MAX + 1, // start_time {0}, // humanStartTime INT64_MAX, // end_time @@ -392,7 +434,7 @@ struct arguments g_args = { 1, // table_batch false, // allow_sys // other options - 5, // thread_num + 8, // thread_num 0, // abort NULL, // arg_list 0, // arg_list_len @@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { } break; +#ifdef AVRO_SUPPORT + case 'v': + g_args.avro = true; + break; + + case 'd': + for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) { + if (0 == strcmp(arg, g_avro_codec[i])) { + g_args.avro_codec = i; + break; + } + } + break; +#endif + case 'r': g_args.resultFile = arg; break; @@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'N': g_args.with_property = false; break; - case 'v': - g_args.avro = true; - break; case 'S': // parse time here. break; @@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { g_args.abort = 1; break; case ARGP_KEY_ARG: - g_args.arg_list = &state->argv[state->next - 1]; - g_args.arg_list_len = state->argc - state->next + 1; + if (strlen(state->argv[state->next - 1])) { + g_args.arg_list = &state->argv[state->next - 1]; + g_args.arg_list_len = state->argc - state->next + 1; + } state->next = state->argc; break; @@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause( for (; counter < numOfCols; counter++) { if (counter != count_temp) { - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { //pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note); if (tableDes->cols[counter].var_value) { pstr += sprintf(pstr, ", \'%s\'", @@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause( pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value); } } else { - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { //pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note); if (tableDes->cols[counter].var_value) { pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value); @@ -1050,1902 +1106,2784 @@ static void dumpCreateMTableClause( free(tmpBuf); } -static int convertTbDesToAvroSchema( - char *dbName, char *tbName, TableDef *tableDes, int colCount, - char **avroSchema) +static int64_t getNtbCountOfStb(char *dbName, char *stbName) { - errorPrint("%s() LN%d TODO: covert table schema to avro schema\n", - __func__, __LINE__); - // { - // "namesapce": "database name", - // "type": "record", - // "name": "table name", - // "fields": [ - // { - // "name": "col0 name", - // "type": "long" - // }, - // { - // "name": "col1 name", - // "type": ["int", "null"] - // }, - // { - // "name": "col2 name", - // "type": ["float", "null"] - // }, - // ... - // { - // "name": "coln name", - // "type": ["string", "null"] - // } - // ] - // } - *avroSchema = (char *)calloc(1, - 17 + TSDB_DB_NAME_LEN /* dbname section */ - + 17 /* type: record */ - + 11 + TSDB_TABLE_NAME_LEN /* tbname section */ - + 10 /* fields section */ - + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */ - if (*avroSchema == NULL) { - errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); + TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, + dbName, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); return -1; } - char *pstr = *avroSchema; - pstr += sprintf(pstr, - "{\"namespace\": \"%s\", \"type\": \"record\", \"name\": \"%s\", \"fields\": [", - dbName, tbName); - for (int i = 0; i < colCount; i ++) { - if (0 == i) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": \"%s\"", - tableDes->cols[i].field, "long"); - } else { - if (strcasecmp(tableDes->cols[i].type, "binary") == 0 || - strcasecmp(tableDes->cols[i].type, "nchar") == 0) { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]", - tableDes->cols[i].field, "string"); - } else { - pstr += sprintf(pstr, - "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]", - tableDes->cols[i].field, tableDes->cols[i].type); - } - } - if ((i != (colCount -1)) - && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) { - pstr += sprintf(pstr, "},"); - } else { - pstr += sprintf(pstr, "}"); - break; - } + int64_t count = 0; + + char command[COMMAND_SIZE]; + + sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); + + TAOS_RES *res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; } - pstr += sprintf(pstr, "]}"); + TAOS_ROW row = NULL; - debugPrint("%s() LN%d, avroSchema: %s\n", __func__, __LINE__, *avroSchema); + if ((row = taos_fetch_row(res)) != NULL) { + count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; + } - return 0; + taos_close(taos); + return count; } -static int64_t dumpNormalTable( - char *dbName, - char *stable, - char *tbName, - int precision, - FILE *fp - ) { +static int getTableDes( + TAOS *taos, + char* dbName, char *table, + TableDef *tableDes, bool isSuperTable) { + TAOS_ROW row = NULL; + TAOS_RES* res = NULL; int colCount = 0; - TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) - + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table - colCount = getTableDes(dbName, tbName, tableDes, false); - - if (colCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - tbName); - free(tableDes); - return -1; - } + char sqlstr[COMMAND_SIZE]; + sprintf(sqlstr, "describe %s.%s;", dbName, table); - // create child-table using super-table - dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp); - } else { // dump table definition - colCount = getTableDes(dbName, tbName, tableDes, false); + res = taos_query(taos, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + return -1; + } - if (colCount < 0) { - errorPrint("%s() LN%d, failed to get table[%s] schema\n", - __func__, - __LINE__, - tbName); - free(tableDes); - return -1; - } + TAOS_FIELD *fields = taos_fetch_fields(res); - // create normal-table or super-table - dumpCreateTableClause(tableDes, colCount, fp, dbName); + tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); + while ((row = taos_fetch_row(res)) != NULL) { + tstrncpy(tableDes->cols[colCount].field, + (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], + min(TSDB_COL_NAME_LEN, + fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); + tstrncpy(tableDes->cols[colCount].type, + (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], + min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1)); + tableDes->cols[colCount].length = + *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); + tstrncpy(tableDes->cols[colCount].note, + (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], + min(COL_NOTE_LEN, + fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); + colCount++; } - char *jsonAvroSchema = NULL; - if (g_args.avro) { - if (0 != convertTbDesToAvroSchema( - dbName, tbName, tableDes, colCount, &jsonAvroSchema)) { - errorPrint("%s() LN%d, convertTbDesToAvroSchema failed\n", - __func__, - __LINE__); - freeTbDes(tableDes); - return -1; - } - } + taos_free_result(res); + res = NULL; - int64_t ret = 0; - if (!g_args.schemaonly) { - ret = dumpTableData(fp, tbName, dbName, precision, - jsonAvroSchema); + if (isSuperTable) { + return colCount; } - tfree(jsonAvroSchema); - freeTbDes(tableDes); - return ret; -} + // if child-table have tag, using select tagName from table to get tagValue + for (int i = 0 ; i < colCount; i++) { + if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; -static int64_t dumpNormalTableBelongStb( - SDbInfo *dbInfo, char *stbName, char *ntbName) -{ - int64_t count = 0; + sprintf(sqlstr, "select %s from %s.%s", + tableDes->cols[i].field, dbName, table); - char tmpBuf[4096] = {0}; - FILE *fp = NULL; + res = taos_query(taos, sqlstr); + code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; + } - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.sql", - g_args.outpath, dbInfo->name, ntbName); - } else { - sprintf(tmpBuf, "%s.%s.sql", - dbInfo->name, ntbName); - } + fields = taos_fetch_fields(res); - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; - } + row = taos_fetch_row(res); + if (NULL == row) { + errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", + __func__, __LINE__, sqlstr, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; + } - count = dumpNormalTable( - dbInfo->name, - stbName, - ntbName, - getPrecisionByString(dbInfo->precision), - fp); + if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { + sprintf(tableDes->cols[i].note, "%s", "NUL"); + sprintf(tableDes->cols[i].value, "%s", "NULL"); + taos_free_result(res); + res = NULL; + continue; + } - fclose(fp); - return count; -} + int32_t* length = taos_fetch_lengths(res); -static int64_t dumpNormalTableWithoutStb(SDbInfo *dbInfo, char *ntbName) -{ - int64_t count = 0; - - char tmpBuf[4096] = {0}; - FILE *fp = NULL; + switch (fields[0].type) { + case TSDB_DATA_TYPE_BOOL: + sprintf(tableDes->cols[i].value, "%d", + ((((int32_t)(*((char *) + row[TSDB_SHOW_TABLES_NAME_INDEX])))==1) + ?1:0)); + break; + case TSDB_DATA_TYPE_TINYINT: + sprintf(tableDes->cols[i].value, "%d", + *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_SMALLINT: + sprintf(tableDes->cols[i].value, "%d", + *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_INT: + sprintf(tableDes->cols[i].value, "%d", + *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_BIGINT: + sprintf(tableDes->cols[i].value, "%" PRId64 "", + *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_FLOAT: + sprintf(tableDes->cols[i].value, "%f", + GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_DOUBLE: + sprintf(tableDes->cols[i].value, "%f", + GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); + break; + case TSDB_DATA_TYPE_BINARY: + memset(tableDes->cols[i].value, 0, + sizeof(tableDes->cols[i].value)); + int len = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + // FIXME for long value + if (len < (COL_VALUEBUF_LEN - 2)) { + converStringToReadable( + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + length[0], + tableDes->cols[i].value, + len); + } else { + tableDes->cols[i].var_value = calloc(1, len * 2); + if (tableDes->cols[i].var_value == NULL) { + errorPrint("%s() LN%d, memory alalocation failed!\n", + __func__, __LINE__); + taos_free_result(res); + return -1; + } + converStringToReadable((char *)row[0], + length[0], + (char *)(tableDes->cols[i].var_value), len); + } + break; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.sql", - g_args.outpath, dbInfo->name, ntbName); - } else { - sprintf(tmpBuf, "%s.%s.sql", - dbInfo->name, ntbName); - } + case TSDB_DATA_TYPE_NCHAR: + memset(tableDes->cols[i].value, 0, + sizeof(tableDes->cols[i].note)); + int nlen = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + if (nlen < (COL_VALUEBUF_LEN-2)) { + char tbuf[COL_VALUEBUF_LEN-2]; // need reserve 2 bytes for ' ' + convertNCharToReadable( + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + length[0], tbuf, COL_VALUEBUF_LEN-2); + sprintf(tableDes->cols[i].value, "%s", tbuf); + } else { + tableDes->cols[i].var_value = calloc(1, nlen * 4); + if (tableDes->cols[i].var_value == NULL) { + errorPrint("%s() LN%d, memory alalocation failed!\n", + __func__, __LINE__); + taos_free_result(res); + return -1; + } + converStringToReadable( + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + length[0], + (char *)(tableDes->cols[i].var_value), nlen); + } + break; + case TSDB_DATA_TYPE_TIMESTAMP: + sprintf(tableDes->cols[i].value, "%" PRId64 "", + *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); +#if 0 + if (!g_args.mysqlFlag) { + sprintf(tableDes->cols[i].value, "%" PRId64 "", + *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, + (int)(ts % 1000)); + } +#endif + break; + default: + break; + } - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; + taos_free_result(res); } - count = dumpNormalTable( - dbInfo->name, - NULL, - ntbName, - getPrecisionByString(dbInfo->precision), - fp); - - fclose(fp); - return count; + return colCount; } -static void *dumpNtbOfDb(void *arg) { - threadInfo *pThreadInfo = (threadInfo *)arg; - - debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom); - debugPrint("dump table count = \t%"PRId64"\n", - pThreadInfo->tablesOfDumpOut); +static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, + FILE *fp, char* dbName) { + int counter = 0; + int count_temp = 0; + char sqlstr[COMMAND_SIZE]; - FILE *fp = NULL; - char tmpBuf[4096] = {0}; + char* pstr = sqlstr; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%d.sql", - g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex); - } else { - sprintf(tmpBuf, "%s.%d.sql", - pThreadInfo->dbName, pThreadInfo->threadIndex); - } + pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", + dbName, tableDes->name); - fp = fopen(tmpBuf, "w"); + for (; counter < numOfCols; counter++) { + if (tableDes->cols[counter].note[0] != '\0') break; - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return NULL; - } + if (counter == 0) { + pstr += sprintf(pstr, " (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } - int64_t count; - for (int64_t i = 0; i < pThreadInfo->tablesOfDumpOut; i++) { - debugPrint("[%d] No.\t%"PRId64" table name: %s\n", - pThreadInfo->threadIndex, i, - ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name); - count = dumpNormalTable( - pThreadInfo->dbName, - ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->stable, - ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name, - pThreadInfo->precision, - fp); - if (count < 0) { - break; + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); } } - fclose(fp); - return NULL; -} - -static void *dumpNormalTablesOfStb(void *arg) { - threadInfo *pThreadInfo = (threadInfo *)arg; - - debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom); - debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->tablesOfDumpOut); - - char command[COMMAND_SIZE]; + count_temp = counter; - sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", - pThreadInfo->dbName, pThreadInfo->stbName, - pThreadInfo->tablesOfDumpOut, pThreadInfo->tableFrom); + for (; counter < numOfCols; counter++) { + if (counter == count_temp) { + pstr += sprintf(pstr, ") TAGS (%s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } else { + pstr += sprintf(pstr, ", %s %s", + tableDes->cols[counter].field, tableDes->cols[counter].type); + } - TAOS_RES *res = taos_query(pThreadInfo->taos, command); - int32_t code = taos_errno(res); - if (code) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - return NULL; + if (0 == strcasecmp(tableDes->cols[counter].type, "binary") + || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) { + pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + } } - FILE *fp = NULL; - char tmpBuf[4096] = {0}; + pstr += sprintf(pstr, ");"); - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/%s.%s.%d.sql", - g_args.outpath, - pThreadInfo->dbName, - pThreadInfo->stbName, - pThreadInfo->threadIndex); - } else { - sprintf(tmpBuf, "%s.%s.%d.sql", - pThreadInfo->dbName, - pThreadInfo->stbName, - pThreadInfo->threadIndex); - } + debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr); + return fprintf(fp, "%s\n\n", sqlstr); +} - fp = fopen(tmpBuf, "w"); +static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp) +{ + uint64_t sizeOfTableDes = + (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return NULL; + TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes); + if (NULL == tableDes) { + errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", + __func__, __LINE__, sizeOfTableDes); + exit(-1); } - TAOS_ROW row = NULL; - int64_t i = 0; - int64_t count; - while((row = taos_fetch_row(res)) != NULL) { - debugPrint("[%d] sub table %"PRId64": name: %s\n", - pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + int colCount = getTableDes(taos, dbInfo->name, + stbName, tableDes, true); - count = dumpNormalTable( - pThreadInfo->dbName, - pThreadInfo->stbName, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], - pThreadInfo->precision, - fp); - if (count < 0) { - break; - } + if (colCount < 0) { + free(tableDes); + errorPrint("%s() LN%d, failed to get stable[%s] schema\n", + __func__, __LINE__, stbName); + exit(-1); } - fclose(fp); - return NULL; + dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name); + free(tableDes); + + return 0; } -static int64_t dumpNtbOfDbByThreads( - SDbInfo *dbInfo, - int64_t ntbCount) +static int64_t dumpCreateSTableClauseOfDb( + SDbInfo *dbInfo, FILE *fp) { - if (ntbCount <= 0) { + TAOS *taos = taos_connect(g_args.host, + g_args.user, g_args.password, dbInfo->name, g_args.port); + if (NULL == taos) { + errorPrint( + "Failed to connect to TDengine server %s by specified database %s\n", + g_args.host, dbInfo->name); return 0; } - int threads = g_args.thread_num; + TAOS_ROW row; + char command[COMMAND_SIZE] = {0}; - int64_t a = ntbCount / threads; - if (a < 1) { - threads = ntbCount; - a = 1; - } - - assert(threads); - int64_t b = ntbCount % threads; - - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - assert(pids); - assert(infos); + sprintf(command, "SHOW %s.STABLES", dbInfo->name); - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->taos = taos_connect( - g_args.host, - g_args.user, - g_args.password, - dbInfo->name, - g_args.port - ); - if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", - __func__, - __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); + TAOS_RES* res = taos_query(taos, command); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + exit(-1); + } - return -1; + int64_t superTblCnt = 0; + while ((row = taos_fetch_row(res)) != NULL) { + if (0 == dumpStableClasuse(taos, dbInfo, + row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { + superTblCnt ++; } - - pThreadInfo->threadIndex = i; - pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0: - ((threadInfo *)(infos + i - 1))->tableFrom + - ((threadInfo *)(infos + i - 1))->tablesOfDumpOut; - strcpy(pThreadInfo->dbName, dbInfo->name); - pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - - pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo); } - for (int64_t i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } + taos_free_result(res); - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - taos_close(pThreadInfo->taos); - } + fprintf(g_fpOfResult, + "# super table counter: %"PRId64"\n", + superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; - free(pids); - free(infos); + taos_close(taos); - return 0; + return superTblCnt; } -static int64_t getNtbCountOfStb(char *dbName, char *stbName) -{ - TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, - dbName, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - return -1; - } +static void dumpCreateDbClause( + SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - int64_t count = 0; + char *pstr = sqlstr; + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); + if (isDumpProperty) { + pstr += sprintf(pstr, + "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", + dbInfo->replica, dbInfo->quorum, dbInfo->days, + dbInfo->keeplist, + dbInfo->cache, + dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, + dbInfo->fsync, + dbInfo->cachelast, + dbInfo->comp, dbInfo->precision, dbInfo->update); + } - char command[COMMAND_SIZE]; + pstr += sprintf(pstr, ";"); + fprintf(fp, "%s\n\n", sqlstr); +} - sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName); +static FILE* openDumpInFile(char *fptr) { + wordexp_t full_path; - TAOS_RES *res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; + if (wordexp(fptr, &full_path, 0) != 0) { + errorPrint("illegal file name: %s\n", fptr); + return NULL; } - TAOS_ROW row = NULL; + char *fname = full_path.we_wordv[0]; - if ((row = taos_fetch_row(res)) != NULL) { - count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX]; + FILE *f = NULL; + if ((fname) && (strlen(fname) > 0)) { + f = fopen(fname, "r"); + if (f == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, fname); + } } - taos_close(taos); - return count; + wordfree(&full_path); + return f; } -static int64_t dumpNtbOfStbByThreads( - SDbInfo *dbInfo, char *stbName) +static uint64_t getFilesNum(char *ext) { - int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); - - if (ntbCount <= 0) { - return 0; - } - - int threads = g_args.thread_num; + uint64_t count = 0; - int64_t a = ntbCount / threads; - if (a < 1) { - threads = ntbCount; - a = 1; - } + int namelen, extlen; + struct dirent *pDirent; + DIR *pDir; - assert(threads); - int64_t b = ntbCount % threads; + extlen = strlen(ext); - pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); - threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); - assert(pids); - assert(infos); + bool isSql = (0 == strcmp(ext, "sql")); - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - pThreadInfo->taos = taos_connect( - g_args.host, - g_args.user, - g_args.password, - dbInfo->name, - g_args.port - ); - if (NULL == pThreadInfo->taos) { - errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", - __func__, - __LINE__, - taos_errstr(NULL)); - free(pids); - free(infos); + pDir = opendir(g_args.inpath); + if (pDir != NULL) { + while ((pDirent = readdir(pDir)) != NULL) { + namelen = strlen (pDirent->d_name); - return -1; + if (namelen > extlen) { + if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) { + if (isSql) { + if (0 == strcmp(pDirent->d_name, "dbs.sql")) { + continue; + } + } + verbosePrint("%s found\n", pDirent->d_name); + count ++; + } + } } - - pThreadInfo->threadIndex = i; - pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0: - ((threadInfo *)(infos + i - 1))->tableFrom + - ((threadInfo *)(infos + i - 1))->tablesOfDumpOut; - strcpy(pThreadInfo->dbName, dbInfo->name); - pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - - strcpy(pThreadInfo->stbName, stbName); - pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo); - } - - for (int64_t i = 0; i < threads; i++) { - pthread_join(pids[i], NULL); - } - - int64_t records = 0; - for (int64_t i = 0; i < threads; i++) { - threadInfo *pThreadInfo = infos + i; - records += pThreadInfo->rowsOfDumpOut; - taos_close(pThreadInfo->taos); + closedir (pDir); } - free(pids); - free(infos); - - return records; + debugPrint("%"PRId64" .%s files found!\n", count, ext); + return count; } -static int dumpStableClasuse(SDbInfo *dbInfo, char *stbName, FILE *fp) +static void freeFileList(char **fileList, int64_t count) { - uint64_t sizeOfTableDes = - (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS); - - TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes); - if (NULL == tableDes) { - errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", - __func__, __LINE__, sizeOfTableDes); - exit(-1); - } - - int colCount = getTableDes(dbInfo->name, - stbName, tableDes, true); - - if (colCount < 0) { - free(tableDes); - errorPrint("%s() LN%d, failed to get stable[%s] schema\n", - __func__, __LINE__, stbName); - exit(-1); + for (int64_t i = 0; i < count; i++) { + tfree(fileList[i]); } - - dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name); - free(tableDes); - - return 0; + tfree(fileList); } -static int64_t dumpCreateSTableClauseOfDb( - SDbInfo *dbInfo, FILE *fp) +static void createDumpinList(char *ext, int64_t count) { - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbInfo->name, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbInfo->name); - return 0; - } - - TAOS_ROW row; - char command[COMMAND_SIZE] = {0}; - - sprintf(command, "SHOW %s.STABLES", dbInfo->name); + bool isSql = (0 == strcmp(ext, "sql")); - TAOS_RES* res = taos_query(taos, command); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - exit(-1); - } + if (isSql) { + g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *)); + assert(g_tsDumpInSqlFiles); - int64_t superTblCnt = 0; - while ((row = taos_fetch_row(res)) != NULL) { - if (0 == dumpStableClasuse(dbInfo, row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) { - superTblCnt ++; + for (int64_t i = 0; i < count; i++) { + g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + assert(g_tsDumpInSqlFiles[i]); } } +#ifdef AVRO_SUPPORT + else { + g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *)); + assert(g_tsDumpInAvroFiles); - taos_free_result(res); + for (int64_t i = 0; i < count; i++) { + g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + assert(g_tsDumpInAvroFiles[i]); + } - fprintf(g_fpOfResult, - "# super table counter: %"PRId64"\n", - superTblCnt); - g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + } +#endif - taos_close(taos); + int namelen, extlen; + struct dirent *pDirent; + DIR *pDir; + + extlen = strlen(ext); + + count = 0; + pDir = opendir(g_args.inpath); + if (pDir != NULL) { + while ((pDirent = readdir(pDir)) != NULL) { + namelen = strlen (pDirent->d_name); + + if (namelen > extlen) { + if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) { + verbosePrint("%s found\n", pDirent->d_name); + if (isSql) { + if (0 == strcmp(pDirent->d_name, "dbs.sql")) { + continue; + } + strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN); + } +#ifdef AVRO_SUPPORT + else { + strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN); + } +#endif + } + } + } + closedir (pDir); + } - return superTblCnt; + debugPrint("%"PRId64" .%s files filled to list!\n", count, ext); } -static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) +#ifdef AVRO_SUPPORT + +static int convertTbDesToJson( + char *dbName, char *tbName, TableDef *tableDes, int colCount, + char **jsonSchema) { - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbInfo->name, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbInfo->name); - return 0; + // { + // "type": "record", + // "name": "dbname.tbname", + // "fields": [ + // { + // "name": "col0 name", + // "type": "long" + // }, + // { + // "name": "col1 name", + // "type": "int" + // }, + // { + // "name": "col2 name", + // "type": "float" + // }, + // { + // "name": "col3 name", + // "type": "boolean" + // }, + // ... + // { + // "name": "coln name", + // "type": "string" + // } + // ] + // } + *jsonSchema = (char *)calloc(1, + 17 + TSDB_DB_NAME_LEN /* dbname section */ + + 17 /* type: record */ + + 11 + TSDB_TABLE_NAME_LEN /* tbname section */ + + 10 /* fields section */ + + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */ + if (*jsonSchema == NULL) { + errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__); + return -1; } - char command[COMMAND_SIZE]; - TAOS_RES *result; - int32_t code; + char *pstr = *jsonSchema; + pstr += sprintf(pstr, + "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [", + dbName, tbName); + for (int i = 0; i < colCount; i ++) { + if (0 == i) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "long"); + } else { + if (strcasecmp(tableDes->cols[i].type, "binary") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "string"); + } else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "bytes"); + } else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "boolean"); + } else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "int"); + } else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "int"); + } else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "long"); + } else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, "long"); + } else { + pstr += sprintf(pstr, + "{\"name\": \"%s\", \"type\": \"%s\"", + tableDes->cols[i].field, + strtolower(tableDes->cols[i].type, tableDes->cols[i].type)); + } + } + if ((i != (colCount -1)) + && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) { + pstr += sprintf(pstr, "},"); + } else { + pstr += sprintf(pstr, "}"); + break; + } + } - sprintf(command, "USE %s", dbInfo->name); - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - errorPrint("invalid database %s, reason: %s\n", - dbInfo->name, taos_errstr(result)); - taos_close(taos); - return 0; + pstr += sprintf(pstr, "]}"); + + debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema); + + return 0; +} + +static void print_json_indent(int indent) { + int i; + for (i = 0; i < indent; i++) { + putchar(' '); } +} - sprintf(command, "SHOW TABLES"); - result = taos_query(taos, command); - code = taos_errno(result); - if (code != 0) { - errorPrint("Failed to show %s\'s tables, reason: %s\n", - dbInfo->name, taos_errstr(result)); - taos_close(taos); - return 0; +const char *json_plural(size_t count) { return count == 1 ? "" : "s"; } + +static void print_json_object(json_t *element, int indent) { + size_t size; + const char *key; + json_t *value; + + print_json_indent(indent); + size = json_object_size(element); + + printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size)); + json_object_foreach(element, key, value) { + print_json_indent(indent + 2); + printf("JSON Key: \"%s\"\n", key); + print_json_aux(value, indent + 2); } +} - g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); +static void print_json_array(json_t *element, int indent) { + size_t i; + size_t size = json_array_size(element); + print_json_indent(indent); - TAOS_ROW row; - int64_t count = 0; - while(NULL != (row = taos_fetch_row(result))) { - debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", - __func__, __LINE__, - count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - tstrncpy(((TableInfo *)(g_tablesList + count))->name, - (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN); - char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; - if (stbName) { - tstrncpy(((TableInfo *)(g_tablesList + count))->stable, - (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); - ((TableInfo *)(g_tablesList + count))->belongStb = true; - } - count ++; + printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size)); + for (i = 0; i < size; i++) { + print_json_aux(json_array_get(element, i), indent + 2); } - taos_close(taos); +} - int64_t records = dumpNtbOfDbByThreads(dbInfo, count); +static void print_json_string(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON String: \"%s\"\n", json_string_value(element)); +} - free(g_tablesList); - g_tablesList = NULL; +static void print_json_integer(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element)); +} - return records; +static void print_json_real(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON Real: %f\n", json_real_value(element)); } -static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) +static void print_json_true(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON True\n"); +} + +static void print_json_false(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON False\n"); +} + +static void print_json_null(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON Null\n"); +} + +static void print_json_aux(json_t *element, int indent) { - dumpCreateDbClause(dbInfo, g_args.with_property, fp); + switch(json_typeof(element)) { + case JSON_OBJECT: + print_json_object(element, indent); + break; - fprintf(g_fpOfResult, "\n#### database: %s\n", - dbInfo->name); - g_resultStatistics.totalDatabasesOfDumpOut++; + case JSON_ARRAY: + print_json_array(element, indent); + break; - dumpCreateSTableClauseOfDb(dbInfo, fp); + case JSON_STRING: + print_json_string(element, indent); + break; - return dumpNTablesOfDb(dbInfo); -} + case JSON_INTEGER: + print_json_integer(element, indent); + break; -static int dumpOut() { - TAOS *taos = NULL; - TAOS_RES *result = NULL; + case JSON_REAL: + print_json_real(element, indent); + break; - TAOS_ROW row; - FILE *fp = NULL; - int32_t count = 0; + case JSON_TRUE: + print_json_true(element, indent); + break; - char tmpBuf[4096] = {0}; - if (g_args.outpath[0] != 0) { - sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); - } else { - sprintf(tmpBuf, "dbs.sql"); - } + case JSON_FALSE: + print_json_false(element, indent); + break; - fp = fopen(tmpBuf, "w"); - if (fp == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, tmpBuf); - return -1; + case JSON_NULL: + print_json_null(element, indent); + break; + + default: + fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element)); } +} - g_args.dumpDbCount = getDumpDbCount(); - debugPrint("%s() LN%d, dump db count: %d\n", - __func__, __LINE__, g_args.dumpDbCount); +static void print_json(json_t *root) { print_json_aux(root, 0); } - if (0 == g_args.dumpDbCount) { - errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); - fclose(fp); - return -1; +static json_t *load_json(char *jsonbuf) +{ + json_t *root; + json_error_t error; + + root = json_loads(jsonbuf, 0, &error); + + if (root) { + return root; + } else { + fprintf(stderr, "json error on line %d: %s\n", error.line, error.text); + return NULL; } +} - g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); - if (g_dbInfos == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", +static RecordSchema *parse_json_to_recordschema(json_t *element) +{ + RecordSchema *recordSchema = malloc(sizeof(RecordSchema)); + assert(recordSchema); + + if (JSON_OBJECT != json_typeof(element)) { + fprintf(stderr, "%s() LN%d, json passed is not an object\n", __func__, __LINE__); - goto _exit_failure; + return NULL; } - char command[COMMAND_SIZE]; + const char *key; + json_t *value; + + json_object_foreach(element, key, value) { + if (0 == strcmp(key, "name")) { + tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1); + } else if (0 == strcmp(key, "fields")) { + if (JSON_ARRAY == json_typeof(value)) { + + size_t i; + size_t size = json_array_size(value); + + verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n", + __func__, __LINE__, + (long long)size, json_plural(size)); + + recordSchema->num_fields = size; + recordSchema->fields = malloc(sizeof(FieldStruct) * size); + assert(recordSchema->fields); + + for (i = 0; i < size; i++) { + FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i); + json_t *arr_element = json_array_get(value, i); + const char *ele_key; + json_t *ele_value; + + json_object_foreach(arr_element, ele_key, ele_value) { + if (0 == strcmp(ele_key, "name")) { + tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1); + } else if (0 == strcmp(ele_key, "type")) { + if (JSON_STRING == json_typeof(ele_value)) { + tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1); + } else if (JSON_OBJECT == json_typeof(ele_value)) { + const char *obj_key; + json_t *obj_value; + + json_object_foreach(ele_value, obj_key, obj_value) { + if (0 == strcmp(obj_key, "type")) { + if (JSON_STRING == json_typeof(obj_value)) { + tstrncpy(field->type, + json_string_value(obj_value), TYPE_NAME_LEN-1); + } + } + } + } + } + } + } + } else { + fprintf(stderr, "%s() LN%d, fields have no array\n", + __func__, __LINE__); + return NULL; + } - /* Connect to server */ - taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - goto _exit_failure; + break; + } } - /* --------------------------------- Main Code -------------------------------- */ - /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ - /* */ - dumpCharset(fp); + return recordSchema; +} - sprintf(command, "show databases"); - result = taos_query(taos, command); - int32_t code = taos_errno(result); +static void freeRecordSchema(RecordSchema *recordSchema) +{ + if (recordSchema) { + if (recordSchema->fields) { + free(recordSchema->fields); + } + free(recordSchema); + } +} - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, command, taos_errstr(result)); - goto _exit_failure; +static int64_t writeResultToAvro( + char *avroFilename, + char *jsonSchema, + TAOS_RES *res) +{ + avro_schema_t schema; + if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) { + errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n", + __func__, __LINE__, jsonSchema, avro_strerror()); + exit(EXIT_FAILURE); } - TAOS_FIELD *fields = taos_fetch_fields(result); + json_t *json_root = load_json(jsonSchema); + debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__); - while ((row = taos_fetch_row(result)) != NULL) { - // sys database name : 'log', but subsequent version changed to 'log' - if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) - && (!g_args.allow_sys)) { - continue; + RecordSchema *recordSchema; + if (json_root) { + if (g_args.debug_print || g_args.verbose_print) { + print_json(json_root); } - if (g_args.databases) { // input multi dbs - if (inDatabasesSeq( - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { - continue; - } - } else if (!g_args.all_databases) { // only input one db - if (strncasecmp(g_args.arg_list[0], - (char *)row[TSDB_SHOW_DB_NAME_INDEX], - fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) - continue; + recordSchema = parse_json_to_recordschema(json_root); + if (NULL == recordSchema) { + fprintf(stderr, "Failed to parse json to recordschema\n"); + exit(EXIT_FAILURE); } - g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); - if (g_dbInfos[count] == NULL) { - errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", - __func__, __LINE__, (uint64_t)sizeof(SDbInfo)); - goto _exit_failure; - } + json_decref(json_root); + } else { + errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema); + exit(EXIT_FAILURE); + } - okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); - tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], - min(TSDB_DB_NAME_LEN, - fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1)); - if (g_args.with_property) { - g_dbInfos[count]->ntables = - *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - g_dbInfos[count]->vgroups = - *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - g_dbInfos[count]->replica = - *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); - g_dbInfos[count]->quorum = - *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - g_dbInfos[count]->days = - *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - - tstrncpy(g_dbInfos[count]->keeplist, - (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1)); - //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); - //g_dbInfos[count]->daysToKeep1; - //g_dbInfos[count]->daysToKeep2; - g_dbInfos[count]->cache = - *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - g_dbInfos[count]->blocks = - *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - g_dbInfos[count]->minrows = - *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - g_dbInfos[count]->maxrows = - *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - g_dbInfos[count]->wallevel = - *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - g_dbInfos[count]->fsync = - *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - g_dbInfos[count]->comp = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - g_dbInfos[count]->cachelast = - (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - - tstrncpy(g_dbInfos[count]->precision, - (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - DB_PRECISION_LEN); - g_dbInfos[count]->update = - *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); - } - count++; + avro_file_writer_t db; - if (g_args.databases) { - if (count > g_args.dumpDbCount) - break; - } else if (!g_args.all_databases) { - if (count >= 1) - break; - } + int rval = avro_file_writer_create_with_codec + (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0); + if (rval) { + errorPrint("There was an error creating %s. reason: %s\n", + avroFilename, avro_strerror()); + exit(EXIT_FAILURE); } - if (count == 0) { - errorPrint("%d databases valid to dump\n", count); - goto _exit_failure; - } + TAOS_ROW row = NULL; - taos_close(taos); + int numFields = taos_field_count(res); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(res); - if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases - for (int i = 0; i < count; i++) { - int64_t records = 0; - records = dumpWholeDatabase(g_dbInfos[i], fp); - if (records >= 0) { - okPrint("Database %s dumped\n", g_dbInfos[i]->name); - g_totalDumpOutRows += records; - } - } - } else { - if (1 == g_args.arg_list_len) { - int64_t records = dumpWholeDatabase(g_dbInfos[0], fp); - if (records >= 0) { - okPrint("Database %s dumped\n", g_dbInfos[0]->name); - g_totalDumpOutRows += records; - } - } else { - dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp); - } + avro_value_iface_t *wface = + avro_generic_class_from_schema(schema); - int superTblCnt = 0 ; - for (int i = 1; g_args.arg_list[i]; i++) { - TableRecordInfo tableRecordInfo; + avro_value_t record; + avro_generic_value_new(wface, &record); - if (getTableRecordInfo(g_dbInfos[0]->name, - g_args.arg_list[i], - &tableRecordInfo) < 0) { - errorPrint("input the invalid table %s\n", - g_args.arg_list[i]); + int64_t count = 0; + while ((row = taos_fetch_row(res)) != NULL) { + avro_value_t value; + + for (int col = 0; col < numFields; col++) { + if (0 != avro_value_get_by_name( + &record, fields[col].name, &value, NULL)) { + errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed", + __func__, __LINE__, fields[col].name); continue; } - int64_t records = 0; - if (tableRecordInfo.isStb) { // dump all table of this stable - int ret = dumpStableClasuse( - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - fp); - if (ret >= 0) { - superTblCnt++; - records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); - } - } else if (tableRecordInfo.belongStb){ - dumpStableClasuse( - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - fp); - records = dumpNormalTableBelongStb( - g_dbInfos[0], - tableRecordInfo.tableRecord.stable, - g_args.arg_list[i]); - } else { - records = dumpNormalTableWithoutStb(g_dbInfos[0], g_args.arg_list[i]); - } + int len; + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_BOOL_NULL); + } else { + avro_value_set_boolean(&value, + ((((int32_t)(*((char *)row[col])))==1)?1:0)); + } + break; - if (records >= 0) { - okPrint("table: %s dumped\n", g_args.arg_list[i]); - g_totalDumpOutRows += records; + case TSDB_DATA_TYPE_TINYINT: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_TINYINT_NULL); + } else { + avro_value_set_int(&value, *((int8_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_SMALLINT: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_SMALLINT_NULL); + } else { + avro_value_set_int(&value, *((int16_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_INT: + if (NULL == row[col]) { + avro_value_set_int(&value, TSDB_DATA_INT_NULL); + } else { + avro_value_set_int(&value, *((int32_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_BIGINT: + if (NULL == row[col]) { + avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL); + } else { + avro_value_set_long(&value, *((int64_t *)row[col])); + } + break; + + case TSDB_DATA_TYPE_FLOAT: + if (NULL == row[col]) { + avro_value_set_float(&value, TSDB_DATA_FLOAT_NULL); + } else { + avro_value_set_float(&value, GET_FLOAT_VAL(row[col])); + } + break; + + case TSDB_DATA_TYPE_DOUBLE: + if (NULL == row[col]) { + avro_value_set_double(&value, TSDB_DATA_DOUBLE_NULL); + } else { + avro_value_set_double(&value, GET_DOUBLE_VAL(row[col])); + } + break; + + case TSDB_DATA_TYPE_BINARY: + if (NULL == row[col]) { + avro_value_set_string(&value, + (char *)NULL); + } else { + avro_value_set_string(&value, (char *)row[col]); + } + break; + + case TSDB_DATA_TYPE_NCHAR: + if (NULL == row[col]) { + avro_value_set_bytes(&value, + (void*)NULL,0); + } else { + len = strlen((char*)row[col]); + avro_value_set_bytes(&value, (void*)(row[col]),len); + } + break; + + case TSDB_DATA_TYPE_TIMESTAMP: + if (NULL == row[col]) { + avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL); + } else { + avro_value_set_long(&value, *((int64_t *)row[col])); + } + break; + + default: + break; } } + + if (0 != avro_file_writer_append_value(db, &record)) { + errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n", + __func__, __LINE__, + avro_strerror()); + } else { + count ++; + } } - /* Close the handle and return */ - fclose(fp); - taos_free_result(result); - freeDbInfos(); - fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); - return 0; + avro_value_decref(&record); + avro_value_iface_decref(wface); + freeRecordSchema(recordSchema); + avro_file_writer_close(db); + avro_schema_decref(schema); -_exit_failure: - fclose(fp); - taos_close(taos); - taos_free_result(result); - freeDbInfos(); - errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows); - return -1; + return count; } -static int getTableDes( - char* dbName, char *table, - TableDef *tableDes, bool isSuperTable) { - TAOS_ROW row = NULL; - TAOS_RES* res = NULL; - int colCount = 0; +void freeBindArray(char *bindArray, int onlyCol) +{ + TAOS_BIND *bind; - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbName, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbName); + for (int j = 0; j < onlyCol; j++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j)); + if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type) + && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) { + tfree(bind->buffer); + } + } +} + +static int dumpInOneAvroFile(char* fcharset, + char* encode, char *avroFilepath) +{ + debugPrint("avroFilepath: %s\n", avroFilepath); + + avro_file_reader_t reader; + + if(avro_file_reader(avroFilepath, &reader)) { + fprintf(stderr, "Unable to open avro file %s: %s\n", + avroFilepath, avro_strerror()); return -1; } - char sqlstr[COMMAND_SIZE]; - sprintf(sqlstr, "describe %s.%s;", dbName, table); + int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4; + char *jsonbuf = calloc(1, buf_len); + assert(jsonbuf); - res = taos_query(taos, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); + avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);; + + avro_schema_t schema; + schema = avro_file_reader_get_writer_schema(reader); + avro_schema_to_json(schema, jsonwriter); + + if (0 == strlen(jsonbuf)) { + errorPrint("Failed to parse avro file: %s schema. reason: %s\n", + avroFilepath, avro_strerror()); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); return -1; } + debugPrint("Schema:\n %s\n", jsonbuf); - TAOS_FIELD *fields = taos_fetch_fields(res); + json_t *json_root = load_json(jsonbuf); + debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__); + if (g_args.debug_print) { + print_json(json_root); + } - tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN); - while ((row = taos_fetch_row(res)) != NULL) { - tstrncpy(tableDes->cols[colCount].field, - (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], - min(TSDB_COL_NAME_LEN + 1, - fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1)); - tstrncpy(tableDes->cols[colCount].type, - (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1)); - tableDes->cols[colCount].length = - *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); - tstrncpy(tableDes->cols[colCount].note, - (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], - min(COL_NOTE_LEN, - fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1)); - colCount++; + const char *namespace = avro_schema_namespace((const avro_schema_t)schema); + debugPrint("Namespace: %s\n", namespace); + + TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password, + namespace, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + return -1; } - taos_free_result(res); - res = NULL; + TAOS_STMT *stmt = taos_stmt_init(taos); + if (NULL == stmt) { + taos_close(taos); + errorPrint("%s() LN%d, stmt init failed! reason: %s\n", + __func__, __LINE__, taos_errstr(NULL)); + return -1; + } - if (isSuperTable) { - return colCount; + RecordSchema *recordSchema = parse_json_to_recordschema(json_root); + if (NULL == recordSchema) { + errorPrint("Failed to parse json to recordschema. reason: %s\n", + avro_strerror()); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; } + json_decref(json_root); - // if child-table have tag, using select tagName from table to get tagValue - for (int i = 0 ; i < colCount; i++) { - if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; + TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) + + sizeof(ColDes) * TSDB_MAX_COLUMNS); - sprintf(sqlstr, "select %s from %s.%s", - tableDes->cols[i].field, dbName, table); + int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false); - res = taos_query(taos, sqlstr); - code = taos_errno(res); - if (code != 0) { - errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } + if (allColCount < 0) { + errorPrint("%s() LN%d, failed to get table[%s] schema\n", + __func__, + __LINE__, + recordSchema->name); + free(tableDes); + freeRecordSchema(recordSchema); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; + } - fields = taos_fetch_fields(res); + char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN); + assert(stmtBuffer); + char *pstr = stmtBuffer; + pstr += sprintf(pstr, "INSERT INTO ? VALUES(?"); - row = taos_fetch_row(res); - if (NULL == row) { - errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n", - __func__, __LINE__, sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; - } + int onlyCol = 1; // at least timestamp + for (int col = 1; col < allColCount; col++) { + if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue; + pstr += sprintf(pstr, ",?"); + onlyCol ++; + } + pstr += sprintf(pstr, ")"); - if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) { - sprintf(tableDes->cols[i].note, "%s", "NUL"); - sprintf(tableDes->cols[i].value, "%s", "NULL"); - taos_free_result(res); - res = NULL; - continue; - } + if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) { + errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n", + taos_stmt_errstr(stmt)); - int32_t* length = taos_fetch_lengths(res); + free(stmtBuffer); + free(tableDes); + freeRecordSchema(recordSchema); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; + } - //int32_t* length = taos_fetch_lengths(tmpResult); - switch (fields[0].type) { - case TSDB_DATA_TYPE_BOOL: - sprintf(tableDes->cols[i].value, "%d", - ((((int32_t)(*((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - sprintf(tableDes->cols[i].value, "%d", - *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_SMALLINT: - sprintf(tableDes->cols[i].value, "%d", - *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_INT: - sprintf(tableDes->cols[i].value, "%d", - *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_BIGINT: - sprintf(tableDes->cols[i].value, "%" PRId64 "", - *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_FLOAT: - sprintf(tableDes->cols[i].value, "%f", - GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_DOUBLE: - sprintf(tableDes->cols[i].value, "%f", - GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX])); - break; - case TSDB_DATA_TYPE_BINARY: - memset(tableDes->cols[i].value, 0, - sizeof(tableDes->cols[i].value)); - int len = strlen((char *)row[0]); - // FIXME for long value - if (len < (COL_VALUEBUF_LEN - 2)) { - converStringToReadable( - (char *)row[0], - length[0], - tableDes->cols[i].value, - len); - } else { - tableDes->cols[i].var_value = calloc(1, len * 2); - if (tableDes->cols[i].var_value == NULL) { - errorPrint("%s() LN%d, memory alalocation failed!\n", - __func__, __LINE__); - taos_free_result(res); - return -1; + if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) { + errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n", + recordSchema->name, taos_stmt_errstr(stmt)); + + free(stmtBuffer); + free(tableDes); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + return -1; + } + + avro_value_iface_t *value_class = avro_generic_class_from_schema(schema); + avro_value_t value; + avro_generic_value_new(value_class, &value); + + char *bindArray = + malloc(sizeof(TAOS_BIND) * onlyCol); + assert(bindArray); + + int success = 0; + int failed = 0; + while(!avro_file_reader_read_value(reader, &value)) { + memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol); + TAOS_BIND *bind; + + for (int i = 0; i < recordSchema->num_fields; i++) { + bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i)); + + avro_value_t field_value; + + FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i); + + bind->is_null = NULL; + int is_null = 1; + if (0 == i) { + int64_t *ts = malloc(sizeof(int64_t)); + assert(ts); + + avro_value_get_by_name(&value, field->name, &field_value, NULL); + avro_value_get_long(&field_value, ts); + + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = ts; + bind->length = &bind->buffer_length; + } else if (0 == avro_value_get_by_name( + &value, field->name, &field_value, NULL)) { + + if (0 == strcasecmp(tableDes->cols[i].type, "int")) { + int32_t *n32 = malloc(sizeof(int32_t)); + assert(n32); + + avro_value_get_int(&field_value, n32); + debugPrint("%d | ", *n32); + bind->buffer_type = TSDB_DATA_TYPE_INT; + bind->buffer_length = sizeof(int32_t); + bind->buffer = n32; + } else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) { + int32_t *n8 = malloc(sizeof(int32_t)); + assert(n8); + + avro_value_get_int(&field_value, n8); + debugPrint("%d | ", *n8); + bind->buffer_type = TSDB_DATA_TYPE_TINYINT; + bind->buffer_length = sizeof(int8_t); + bind->buffer = (int8_t *)n8; + } else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) { + int32_t *n16 = malloc(sizeof(int32_t)); + assert(n16); + + avro_value_get_int(&field_value, n16); + debugPrint("%d | ", *n16); + bind->buffer_type = TSDB_DATA_TYPE_SMALLINT; + bind->buffer_length = sizeof(int16_t); + bind->buffer = (int32_t*)n16; + } else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) { + int64_t *n64 = malloc(sizeof(int64_t)); + assert(n64); + + avro_value_get_long(&field_value, n64); + debugPrint("%"PRId64" | ", *n64); + bind->buffer_type = TSDB_DATA_TYPE_BIGINT; + bind->buffer_length = sizeof(int64_t); + bind->buffer = n64; + } else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) { + int64_t *n64 = malloc(sizeof(int64_t)); + assert(n64); + + avro_value_get_long(&field_value, n64); + debugPrint("%"PRId64" | ", *n64); + bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP; + bind->buffer_length = sizeof(int64_t); + bind->buffer = n64; + } else if (0 == strcasecmp(tableDes->cols[i].type, "float")) { + float *f = malloc(sizeof(float)); + assert(f); + + avro_value_get_float(&field_value, f); + if (TSDB_DATA_FLOAT_NULL == *f) { + debugPrint("%s | ", "NULL"); + bind->is_null = &is_null; + } else { + debugPrint("%f | ", *f); + bind->buffer = f; } - converStringToReadable((char *)row[0], - length[0], - (char *)(tableDes->cols[i].var_value), len); + bind->buffer_type = TSDB_DATA_TYPE_FLOAT; + bind->buffer_length = sizeof(float); + } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) { + double *dbl = malloc(sizeof(double)); + assert(dbl); + + avro_value_get_double(&field_value, dbl); + if (TSDB_DATA_DOUBLE_NULL == *dbl) { + debugPrint("%s | ", "NULL"); + bind->is_null = &is_null; + } else { + debugPrint("%f | ", *dbl); + bind->buffer = dbl; + } + bind->buffer = dbl; + bind->buffer_type = TSDB_DATA_TYPE_DOUBLE; + bind->buffer_length = sizeof(double); + } else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) { + size_t size; + + char *buf = NULL; + avro_value_get_string(&field_value, (const char **)&buf, &size); + debugPrint("%s | ", (char *)buf); + bind->buffer_type = TSDB_DATA_TYPE_BINARY; + bind->buffer_length = tableDes->cols[i].length; + bind->buffer = buf; + } else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) { + size_t bytessize; + void *bytesbuf = NULL; + + avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize); + debugPrint("%s | ", (char*)bytesbuf); + bind->buffer_type = TSDB_DATA_TYPE_NCHAR; + bind->buffer_length = tableDes->cols[i].length; + bind->buffer = bytesbuf; + } else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) { + int32_t *bl = malloc(sizeof(int32_t)); + assert(bl); + + avro_value_get_boolean(&field_value, bl); + debugPrint("%s | ", (*bl)?"true":"false"); + bind->buffer_type = TSDB_DATA_TYPE_BOOL; + bind->buffer_length = sizeof(int8_t); + bind->buffer = (int8_t*)bl; } - break; - case TSDB_DATA_TYPE_NCHAR: - { - memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note)); - char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' ' - convertNCharToReadable((char *)row[TSDB_SHOW_TABLES_NAME_INDEX], length[0], tbuf, COL_NOTE_LEN); - sprintf(tableDes->cols[i].value, "%s", tbuf); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: - sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); -#if 0 - if (!g_args.mysqlFlag) { - sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000)); - } -#endif - break; - default: - break; + bind->length = &bind->buffer_length; + } + } + debugPrint("%s", "\n"); - taos_free_result(res); + if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + freeBindArray(bindArray, onlyCol); + failed --; + continue; + } + if (0 != taos_stmt_add_batch(stmt)) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + freeBindArray(bindArray, onlyCol); + failed --; + continue; + } + + freeBindArray(bindArray, onlyCol); + + success ++; + continue; } + if (0 != taos_stmt_execute(stmt)) { + errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n", + __func__, __LINE__, taos_stmt_errstr(stmt)); + failed = success; + } + + avro_value_decref(&value); + avro_value_iface_decref(value_class); + + tfree(bindArray); + + tfree(stmtBuffer); + tfree(tableDes); + + freeRecordSchema(recordSchema); + avro_schema_decref(schema); + avro_file_reader_close(reader); + avro_writer_free(jsonwriter); + + tfree(jsonbuf); + + taos_stmt_close(stmt); taos_close(taos); - return colCount; + + if (failed < 0) + return failed; + return success; } -static void dumpCreateDbClause( - SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { - char sqlstr[TSDB_MAX_SQL_LEN] = {0}; +static void* dumpInAvroWorkThreadFp(void *arg) +{ + threadInfo *pThread = (threadInfo*)arg; + setThreadName("dumpInAvroWorkThrd"); + verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n", + pThread->threadIndex, pThread->count, pThread->from); + + for (int64_t i = 0; i < pThread->count; i++) { + char avroFile[MAX_PATH_LEN]; + sprintf(avroFile, "%s/%s", g_args.inpath, + g_tsDumpInAvroFiles[pThread->from + i]); + + if (0 == dumpInOneAvroFile(g_tsCharset, + g_args.encode, + avroFile)) { + okPrint("[%d] Success dump in file: %s\n", + pThread->threadIndex, avroFile); + } + } - char *pstr = sqlstr; - pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); - if (isDumpProperty) { - pstr += sprintf(pstr, - "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d", - dbInfo->replica, dbInfo->quorum, dbInfo->days, - dbInfo->keeplist, - dbInfo->cache, - dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, - dbInfo->fsync, - dbInfo->cachelast, - dbInfo->comp, dbInfo->precision, dbInfo->update); + return NULL; +} + +static int64_t dumpInAvroWorkThreads() +{ + int64_t ret = 0; + + int32_t threads = g_args.thread_num; + + uint64_t avroFileCount = getFilesNum("avro"); + if (0 == avroFileCount) { + debugPrint("No .avro file found in %s\n", g_args.inpath); + return 0; } - pstr += sprintf(pstr, ";"); - fprintf(fp, "%s\n\n", sqlstr); + createDumpinList("avro", avroFileCount); + + threadInfo *pThread; + + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = (threadInfo *)calloc( + threads, sizeof(threadInfo)); + assert(pids); + assert(infos); + + int64_t a = avroFileCount / threads; + if (a < 1) { + threads = avroFileCount; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = avroFileCount % threads; + } + + int64_t from = 0; + + for (int32_t t = 0; t < threads; ++t) { + pThread = infos + t; + pThread->threadIndex = t; + + pThread->from = from; + pThread->count = tcount; + verbosePrint( + "Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n", + t, pThread->count, pThread->from); + + if (pthread_create(pids + t, NULL, + dumpInAvroWorkThreadFp, (void*)pThread) != 0) { + errorPrint("%s() LN%d, thread[%d] failed to start\n", + __func__, __LINE__, pThread->threadIndex); + exit(EXIT_FAILURE); + } + } + + for (int t = 0; t < threads; ++t) { + pthread_join(pids[t], NULL); + } + + free(infos); + free(pids); + + freeFileList(g_tsDumpInAvroFiles, avroFileCount); + + return ret; } -static int dumpCreateTableClause(TableDef *tableDes, int numOfCols, - FILE *fp, char* dbName) { - int counter = 0; - int count_temp = 0; - char sqlstr[COMMAND_SIZE]; +#endif /* AVRO_SUPPORT */ - char* pstr = sqlstr; +static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) +{ + int64_t totalRows = 0; - pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", - dbName, tableDes->name); + int32_t sql_buf_len = g_args.max_sql_len; + char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); + assert(tmpBuffer); - for (; counter < numOfCols; counter++) { - if (tableDes->cols[counter].note[0] != '\0') break; + char *pstr = tmpBuffer; - if (counter == 0) { - pstr += sprintf(pstr, " (%s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); + TAOS_ROW row = NULL; + int rowFlag = 0; + int64_t lastRowsPrint = 5000000; + int count = 0; + + int numFields = taos_field_count(res); + assert(numFields > 0); + TAOS_FIELD *fields = taos_fetch_fields(res); + + int32_t curr_sqlstr_len = 0; + int32_t total_sqlstr_len = 0; + + while ((row = taos_fetch_row(res)) != NULL) { + curr_sqlstr_len = 0; + + int32_t* length = taos_fetch_lengths(res); // act len + + if (count == 0) { + total_sqlstr_len = 0; + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "INSERT INTO %s.%s VALUES (", dbName, tbName); } else { - pstr += sprintf(pstr, ", %s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); + if (g_args.mysqlFlag) { + if (0 == rowFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + rowFlag++; + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); + } + } else { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); + } } - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { - pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); - } + for (int col = 0; col < numFields; col++) { + if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); + + if (row[col] == NULL) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); + continue; + } + + switch (fields[col].type) { + case TSDB_DATA_TYPE_BOOL: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + ((((int32_t)(*((char *)row[col])))==1)?1:0)); + break; + + case TSDB_DATA_TYPE_TINYINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + *((int8_t *)row[col])); + break; + + case TSDB_DATA_TYPE_SMALLINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + *((int16_t *)row[col])); + break; + + case TSDB_DATA_TYPE_INT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", + *((int32_t *)row[col])); + break; + + case TSDB_DATA_TYPE_BIGINT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "%" PRId64 "", + *((int64_t *)row[col])); + break; + + case TSDB_DATA_TYPE_FLOAT: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", + GET_FLOAT_VAL(row[col])); + break; + + case TSDB_DATA_TYPE_DOUBLE: + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", + GET_DOUBLE_VAL(row[col])); + break; + + case TSDB_DATA_TYPE_BINARY: + { + char tbuf[COMMAND_SIZE] = {0}; + converStringToReadable((char *)row[col], length[col], + tbuf, COMMAND_SIZE); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_NCHAR: + { + char tbuf[COMMAND_SIZE] = {0}; + convertNCharToReadable((char *)row[col], length[col], + tbuf, COMMAND_SIZE); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "\'%s\'", tbuf); + break; + } + case TSDB_DATA_TYPE_TIMESTAMP: + if (!g_args.mysqlFlag) { + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "%" PRId64 "", + *(int64_t *)row[col]); + } else { + char buf[64] = "\0"; + int64_t ts = *((int64_t *)row[col]); + time_t tt = (time_t)(ts / 1000); + struct tm *ptm = localtime(&tt); + strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, + "\'%s.%03d\'", + buf, (int)(ts % 1000)); + } + break; + default: + break; + } + } + + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); + + totalRows++; + count++; + fprintf(fp, "%s", tmpBuffer); + + if (totalRows >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from %s.%s\n", + totalRows, dbName, tbName); + lastRowsPrint += 5000000; + } + + total_sqlstr_len += curr_sqlstr_len; + + if ((count >= g_args.data_batch) + || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { + fprintf(fp, ";\n"); + count = 0; + } + } + + debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); + + fprintf(fp, "\n"); + free(tmpBuffer); + + return totalRows; +} + +static int64_t dumpTableData(FILE *fp, char *tbName, + char* dbName, int precision, + char *jsonSchema) { + int64_t totalRows = 0; + + char sqlstr[1024] = {0}; + + int64_t start_time, end_time; + if (strlen(g_args.humanStartTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanStartTime, &start_time, + strlen(g_args.humanStartTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", + g_args.humanStartTime); + return -1; + } + } else { + start_time = g_args.start_time; + } + + if (strlen(g_args.humanEndTime)) { + if (TSDB_CODE_SUCCESS != taosParseTime( + g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), + precision, 0)) { + errorPrint("Input %s, time format error!\n", g_args.humanEndTime); + return -1; + } + } else { + end_time = g_args.end_time; + } + + sprintf(sqlstr, + "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", + dbName, tbName, start_time, end_time); + + TAOS *taos = taos_connect(g_args.host, + g_args.user, g_args.password, dbName, g_args.port); + if (NULL == taos) { + errorPrint( + "Failed to connect to TDengine server %s by specified database %s\n", + g_args.host, dbName); + return -1; + } + + TAOS_RES* res = taos_query(taos, sqlstr); + int32_t code = taos_errno(res); + if (code != 0) { + errorPrint("failed to run command %s, reason: %s\n", + sqlstr, taos_errstr(res)); + taos_free_result(res); + taos_close(taos); + return -1; + } + +#ifdef AVRO_SUPPORT + if (g_args.avro) { + char avroFilename[MAX_PATH_LEN] = {0}; + + if (g_args.outpath[0] != 0) { + sprintf(avroFilename, "%s/%s.%s.avro", + g_args.outpath, dbName, tbName); + } else { + sprintf(avroFilename, "%s.%s.avro", + dbName, tbName); + } + + totalRows = writeResultToAvro(avroFilename, jsonSchema, res); + } else +#endif + totalRows = writeResultToSql(res, fp, dbName, tbName); + + taos_free_result(res); + taos_close(taos); + return totalRows; +} + +static int64_t dumpNormalTable( + TAOS *taos, + char *dbName, + char *stable, + char *tbName, + int precision, + FILE *fp + ) { + int colCount = 0; + + TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef) + + sizeof(ColDes) * TSDB_MAX_COLUMNS); + + if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table + colCount = getTableDes(taos, dbName, tbName, tableDes, false); + + if (colCount < 0) { + errorPrint("%s() LN%d, failed to get table[%s] schema\n", + __func__, + __LINE__, + tbName); + free(tableDes); + return -1; + } + + // create child-table using super-table + dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp); + } else { // dump table definition + colCount = getTableDes(taos, dbName, tbName, tableDes, false); + + if (colCount < 0) { + errorPrint("%s() LN%d, failed to get table[%s] schema\n", + __func__, + __LINE__, + tbName); + free(tableDes); + return -1; + } + + // create normal-table or super-table + dumpCreateTableClause(tableDes, colCount, fp, dbName); + } + + char *jsonSchema = NULL; +#ifdef AVRO_SUPPORT + if (g_args.avro) { + if (0 != convertTbDesToJson( + dbName, tbName, tableDes, colCount, &jsonSchema)) { + errorPrint("%s() LN%d, convertTbDesToJson failed\n", + __func__, + __LINE__); + freeTbDes(tableDes); + return -1; + } + } +#endif + + int64_t totalRows = 0; + if (!g_args.schemaonly) { + totalRows = dumpTableData(fp, tbName, dbName, precision, + jsonSchema); + } + + tfree(jsonSchema); + freeTbDes(tableDes); + return totalRows; +} + +static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName) +{ + int64_t count = 0; + + char tmpBuf[MAX_PATH_LEN] = {0}; + FILE *fp = NULL; + + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%s.sql", + g_args.outpath, dbInfo->name, ntbName); + } else { + sprintf(tmpBuf, "%s.%s.sql", + dbInfo->name, ntbName); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } + + count = dumpNormalTable( + taos, + dbInfo->name, + NULL, + ntbName, + getPrecisionByString(dbInfo->precision), + fp); + if (count > 0) { + atomic_add_fetch_64(&g_totalDumpOutRows, count); + } + fclose(fp); + return count; +} + +static int64_t dumpNormalTableBelongStb( + TAOS *taos, + SDbInfo *dbInfo, char *stbName, char *ntbName) +{ + int64_t count = 0; + + char tmpBuf[MAX_PATH_LEN] = {0}; + FILE *fp = NULL; + + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%s.sql", + g_args.outpath, dbInfo->name, ntbName); + } else { + sprintf(tmpBuf, "%s.%s.sql", + dbInfo->name, ntbName); + } + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } + + count = dumpNormalTable( + taos, + dbInfo->name, + stbName, + ntbName, + getPrecisionByString(dbInfo->precision), + fp); + if (count > 0) { + atomic_add_fetch_64(&g_totalDumpOutRows, count); + } + + fclose(fp); + return count; +} + +static void *dumpNtbOfDb(void *arg) { + threadInfo *pThreadInfo = (threadInfo *)arg; + + debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from); + debugPrint("dump table count = \t%"PRId64"\n", + pThreadInfo->count); + + FILE *fp = NULL; + char tmpBuf[MAX_PATH_LEN] = {0}; + + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%d.sql", + g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex); + } else { + sprintf(tmpBuf, "%s.%d.sql", + pThreadInfo->dbName, pThreadInfo->threadIndex); + } + + fp = fopen(tmpBuf, "w"); + + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return NULL; + } + + int64_t count; + for (int64_t i = 0; i < pThreadInfo->count; i++) { + debugPrint("[%d] No.\t%"PRId64" table name: %s\n", + pThreadInfo->threadIndex, i, + ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name); + count = dumpNormalTable( + pThreadInfo->taos, + pThreadInfo->dbName, + ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable, + ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name, + pThreadInfo->precision, + fp); + if (count < 0) { + break; + } else { + atomic_add_fetch_64(&g_totalDumpOutRows, count); + } + } + + fclose(fp); + return NULL; +} + +static int checkParam() { + if (g_args.all_databases && g_args.databases) { + errorPrint("%s", "conflict option --all-databases and --databases\n"); + return -1; + } + + if (g_args.start_time > g_args.end_time) { + errorPrint("%s", "start time is larger than end time\n"); + return -1; + } + + if (g_args.arg_list_len == 0) { + if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { + errorPrint("%s", "taosdump requires parameters\n"); + return -1; + } + } + /* + if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) { + fprintf(stderr, "duplicate parameter input and output file path\n"); + return -1; + } + */ + if (!g_args.isDumpIn && g_args.encode != NULL) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + if (g_args.table_batch <= 0) { + fprintf(stderr, "invalid option in dump out\n"); + return -1; + } + + return 0; +} + +/* +static bool isEmptyCommand(char *cmd) { + char *pchar = cmd; + + while (*pchar != '\0') { + if (*pchar != ' ') return false; + pchar++; + } + + return true; +} + +static void taosReplaceCtrlChar(char *str) { + bool ctrlOn = false; + char *pstr = NULL; + + for (pstr = str; *str != '\0'; ++str) { + if (ctrlOn) { + switch (*str) { + case 'n': + *pstr = '\n'; + pstr++; + break; + case 'r': + *pstr = '\r'; + pstr++; + break; + case 't': + *pstr = '\t'; + pstr++; + break; + case '\\': + *pstr = '\\'; + pstr++; + break; + case '\'': + *pstr = '\''; + pstr++; + break; + default: + break; + } + ctrlOn = false; + } else { + if (*str == '\\') { + ctrlOn = true; + } else { + *pstr = *str; + pstr++; + } + } + } + + *pstr = '\0'; +} +*/ + +char *ascii_literal_list[] = { + "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", + "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", + "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", + "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", + "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", + "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", + "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", + "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", + "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", + "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", + "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", + "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", + "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", + "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", + "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", + "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", + "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", + "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; + +static int converStringToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + while (size > 0) { + if (*pstr == '\0') break; + pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); + pstr++; + size--; } + *pbuf = '\0'; + return 0; +} - count_temp = counter; - - for (; counter < numOfCols; counter++) { - if (counter == count_temp) { - pstr += sprintf(pstr, ") TAGS (%s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); - } else { - pstr += sprintf(pstr, ", %s %s", - tableDes->cols[counter].field, tableDes->cols[counter].type); +static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { + char *pstr = str; + char *pbuf = buf; + wchar_t wc; + while (size > 0) { + if (*pstr == '\0') break; + int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); + if (byte_width < 0) { + errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__); + exit(-1); } - if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 || - strcasecmp(tableDes->cols[counter].type, "nchar") == 0) { - pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length); + if ((int)wc < 256) { + pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + } else { + memcpy(pbuf, pstr, byte_width); + pbuf += byte_width; } + pstr += byte_width; } - pstr += sprintf(pstr, ");"); - - debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr); - return fprintf(fp, "%s\n\n", sqlstr); -} + *pbuf = '\0'; -static int writeSchemaToAvro(char *jsonAvroSchema) -{ - errorPrint("%s() LN%d, TODO: implement write schema to avro", - __func__, __LINE__); return 0; } -static int64_t writeResultToAvro(TAOS_RES *res) -{ - errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__); - return 0; +static void dumpCharset(FILE *fp) { + char charsetline[256]; + + (void)fseek(fp, 0, SEEK_SET); + sprintf(charsetline, "#!%s\n", tsCharset); + (void)fwrite(charsetline, strlen(charsetline), 1, fp); } -static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName) -{ - int64_t totalRows = 0; +static void loadFileCharset(FILE *fp, char *fcharset) { + char * line = NULL; + size_t line_size = 0; - int32_t sql_buf_len = g_args.max_sql_len; - char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); - if (tmpBuffer == NULL) { - errorPrint("failed to allocate %d memory\n", sql_buf_len + 128); - return -1; + (void)fseek(fp, 0, SEEK_SET); + ssize_t size = getline(&line, &line_size, fp); + if (size <= 2) { + goto _exit_no_charset; } - char *pstr = tmpBuffer; - - TAOS_ROW row = NULL; - int numFields = 0; - int rowFlag = 0; - int64_t lastRowsPrint = 5000000; - int count = 0; + if (strncmp(line, "#!", 2) != 0) { + goto _exit_no_charset; + } + if (line[size - 1] == '\n') { + line[size - 1] = '\0'; + size--; + } + strcpy(fcharset, line + 2); - numFields = taos_field_count(res); - assert(numFields > 0); - TAOS_FIELD *fields = taos_fetch_fields(res); + tfree(line); + return; - int32_t curr_sqlstr_len = 0; - int32_t total_sqlstr_len = 0; +_exit_no_charset: + (void)fseek(fp, 0, SEEK_SET); + *fcharset = '\0'; + tfree(line); + return; +} - while ((row = taos_fetch_row(res)) != NULL) { - curr_sqlstr_len = 0; +// ======== dumpIn support multi threads functions ================================// - int32_t* length = taos_fetch_lengths(res); // act len +static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset, + char* encode, char* fileName) { + int read_len = 0; + char * cmd = NULL; + size_t cmd_len = 0; + char * line = NULL; + size_t line_len = 0; - if (count == 0) { - total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, - "INSERT INTO %s.%s VALUES (", dbName, tbName); - } else { - if (g_args.mysqlFlag) { - if (0 == rowFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - rowFlag++; - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ("); - } - } else { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "("); - } - } + cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); + if (cmd == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + return -1; + } - for (int col = 0; col < numFields; col++) { - if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", "); + int lastRowsPrint = 5000000; + int lineNo = 0; + while ((read_len = getline(&line, &line_len, fp)) != -1) { + ++lineNo; + if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; + line[--read_len] = '\0'; - if (row[col] == NULL) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL"); - continue; - } + //if (read_len == 0 || isCommentLine(line)) { // line starts with # + if (read_len == 0 ) { + continue; + } - switch (fields[col].type) { - case TSDB_DATA_TYPE_BOOL: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", - ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0)); - break; - case TSDB_DATA_TYPE_TINYINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col])); - break; - case TSDB_DATA_TYPE_SMALLINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col])); - break; - case TSDB_DATA_TYPE_INT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col])); - break; - case TSDB_DATA_TYPE_BIGINT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", - *((int64_t *)row[col])); - break; - case TSDB_DATA_TYPE_FLOAT: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col])); - break; - case TSDB_DATA_TYPE_DOUBLE: - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); - break; - case TSDB_DATA_TYPE_BINARY: - { - char tbuf[COMMAND_SIZE] = {0}; - converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_NCHAR: - { - char tbuf[COMMAND_SIZE] = {0}; - convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); - break; - } - case TSDB_DATA_TYPE_TIMESTAMP: - if (!g_args.mysqlFlag) { - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", - *(int64_t *)row[col]); - } else { - char buf[64] = "\0"; - int64_t ts = *((int64_t *)row[col]); - time_t tt = (time_t)(ts / 1000); - struct tm *ptm = localtime(&tt); - strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm); - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'", - buf, (int)(ts % 1000)); - } - break; - default: - break; - } + if (line[read_len - 1] == '\\') { + line[read_len - 1] = ' '; + memcpy(cmd + cmd_len, line, read_len); + cmd_len += read_len; + continue; } - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")"); + memcpy(cmd + cmd_len, line, read_len); + cmd[read_len + cmd_len]= '\0'; + if (queryDbImpl(taos, cmd)) { + errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", + __func__, __LINE__, lineNo, fileName); + fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); + } - totalRows++; - count++; - fprintf(fp, "%s", tmpBuffer); + memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); + cmd_len = 0; - if (totalRows >= lastRowsPrint) { - printf(" %"PRId64 " rows already be dumpout from %s.%s\n", - totalRows, dbName, tbName); + if (lineNo >= lastRowsPrint) { + printf(" %d lines already be executed from file %s\n", lineNo, fileName); lastRowsPrint += 5000000; } + } - total_sqlstr_len += curr_sqlstr_len; + tfree(cmd); + tfree(line); + return 0; +} - if ((count >= g_args.data_batch) - || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) { - fprintf(fp, ";\n"); - count = 0; +static void* dumpInSqlWorkThreadFp(void *arg) +{ + threadInfo *pThread = (threadInfo*)arg; + setThreadName("dumpInSqlWorkThrd"); + fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n", + pThread->threadIndex, pThread->count, pThread->from); + + for (int64_t i = 0; i < pThread->count; i++) { + char sqlFile[MAX_PATH_LEN]; + sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]); + + FILE* fp = openDumpInFile(sqlFile); + if (NULL == fp) { + errorPrint("[%d] Failed to open input file: %s\n", + pThread->threadIndex, sqlFile); + continue; } + + if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode, + sqlFile)) { + okPrint("[%d] Success dump in file: %s\n", + pThread->threadIndex, sqlFile); + } + fclose(fp); } - debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len); + return NULL; +} - fprintf(fp, "\n"); - atomic_add_fetch_64(&g_totalDumpOutRows, totalRows); - free(tmpBuffer); +static int dumpInSqlWorkThreads() +{ + int32_t threads = g_args.thread_num; - return 0; -} + uint64_t sqlFileCount = getFilesNum("sql"); + if (0 == sqlFileCount) { + debugPrint("No .sql file found in %s\n", g_args.inpath); + return 0; + } -static int64_t dumpTableData(FILE *fp, char *tbName, - char* dbName, int precision, - char *jsonAvroSchema) { - int64_t totalRows = 0; + createDumpinList("sql", sqlFileCount); + + threadInfo *pThread; + + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = (threadInfo *)calloc( + threads, sizeof(threadInfo)); + assert(pids); + assert(infos); + + int64_t a = sqlFileCount / threads; + if (a < 1) { + threads = sqlFileCount; + a = 1; + } + + int64_t b = 0; + if (threads != 0) { + b = sqlFileCount % threads; + } - char sqlstr[1024] = {0}; + int64_t from = 0; - int64_t start_time, end_time; - if (strlen(g_args.humanStartTime)) { - if (TSDB_CODE_SUCCESS != taosParseTime( - g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime), - precision, 0)) { - errorPrint("Input %s, time format error!\n", g_args.humanStartTime); + for (int32_t t = 0; t < threads; ++t) { + pThread = infos + t; + pThread->threadIndex = t; + + pThread->from = from; + pThread->count = tcount; + verbosePrint( + "Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n", + t, pThread->count, pThread->from); + + pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (pThread->taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + free(infos); + free(pids); return -1; } - } else { - start_time = g_args.start_time; - } - if (strlen(g_args.humanEndTime)) { - if (TSDB_CODE_SUCCESS != taosParseTime( - g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime), - precision, 0)) { - errorPrint("Input %s, time format error!\n", g_args.humanEndTime); - return -1; + if (pthread_create(pids + t, NULL, + dumpInSqlWorkThreadFp, (void*)pThread) != 0) { + errorPrint("%s() LN%d, thread[%d] failed to start\n", + __func__, __LINE__, pThread->threadIndex); + exit(EXIT_FAILURE); } - } else { - end_time = g_args.end_time; } - sprintf(sqlstr, - "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", - dbName, tbName, start_time, end_time); - - TAOS *taos = taos_connect(g_args.host, - g_args.user, g_args.password, dbName, g_args.port); - if (NULL == taos) { - errorPrint( - "Failed to connect to TDengine server %s by specified database %s\n", - g_args.host, dbName); - return -1; + for (int t = 0; t < threads; ++t) { + pthread_join(pids[t], NULL); } - TAOS_RES* res = taos_query(taos, sqlstr); - int32_t code = taos_errno(res); - if (code != 0) { - errorPrint("failed to run command %s, reason: %s\n", - sqlstr, taos_errstr(res)); - taos_free_result(res); - taos_close(taos); - return -1; + for (int t = 0; t < threads; ++t) { + taos_close(infos[t].taos); } + free(infos); + free(pids); - if (g_args.avro) { - writeSchemaToAvro(jsonAvroSchema); - totalRows = writeResultToAvro(res); - } else { - totalRows = writeResultToSql(res, fp, dbName, tbName); - } + freeFileList(g_tsDumpInSqlFiles, sqlFileCount); - taos_free_result(res); - taos_close(taos); - return totalRows; + return 0; } -static int checkParam() { - if (g_args.all_databases && g_args.databases) { - errorPrint("%s", "conflict option --all-databases and --databases\n"); - return -1; - } +static int dumpInDbs() +{ + TAOS *taos = taos_connect( + g_args.host, g_args.user, g_args.password, + NULL, g_args.port); - if (g_args.start_time > g_args.end_time) { - errorPrint("%s", "start time is larger than end time\n"); + if (taos == NULL) { + errorPrint("%s() LN%d, failed to connect to TDengine server\n", + __func__, __LINE__); return -1; } - if (g_args.arg_list_len == 0) { - if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) { - errorPrint("%s", "taosdump requires parameters\n"); - return -1; - } - } - /* - if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) { - fprintf(stderr, "duplicate parameter input and output file path\n"); - return -1; - } - */ - if (!g_args.isDumpIn && g_args.encode != NULL) { - fprintf(stderr, "invalid option in dump out\n"); + char dbsSql[MAX_PATH_LEN]; + sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql"); + + FILE *fp = openDumpInFile(dbsSql); + if (NULL == fp) { + errorPrint("%s() LN%d, failed to open input file %s\n", + __func__, __LINE__, dbsSql); return -1; } + debugPrint("Success Open input file: %s\n", dbsSql); + loadFileCharset(fp, g_tsCharset); - if (g_args.table_batch <= 0) { - fprintf(stderr, "invalid option in dump out\n"); - return -1; + if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) { + okPrint("Success dump in file: %s !\n", dbsSql); } + fclose(fp); + taos_close(taos); + return 0; } -/* -static bool isEmptyCommand(char *cmd) { - char *pchar = cmd; +static int64_t dumpIn() { + assert(g_args.isDumpIn); - while (*pchar != '\0') { - if (*pchar != ' ') return false; - pchar++; - } + int64_t ret = 0; + if (dumpInDbs()) { + errorPrint("%s", "Failed to dump dbs in!\n"); + exit(EXIT_FAILURE); + } - return true; + ret = dumpInSqlWorkThreads(); + +#ifdef AVRO_SUPPORT + if (0 == ret) { + ret = dumpInAvroWorkThreads(); + } +#endif + + return ret; } -static void taosReplaceCtrlChar(char *str) { - bool ctrlOn = false; - char *pstr = NULL; +static void *dumpNormalTablesOfStb(void *arg) { + threadInfo *pThreadInfo = (threadInfo *)arg; - for (pstr = str; *str != '\0'; ++str) { - if (ctrlOn) { - switch (*str) { - case 'n': - *pstr = '\n'; - pstr++; - break; - case 'r': - *pstr = '\r'; - pstr++; - break; - case 't': - *pstr = '\t'; - pstr++; - break; - case '\\': - *pstr = '\\'; - pstr++; - break; - case '\'': - *pstr = '\''; - pstr++; - break; - default: - break; - } - ctrlOn = false; - } else { - if (*str == '\\') { - ctrlOn = true; - } else { - *pstr = *str; - pstr++; - } + debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from); + debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count); + + char command[COMMAND_SIZE]; + + sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"", + pThreadInfo->dbName, pThreadInfo->stbName, + pThreadInfo->count, pThreadInfo->from); + + TAOS_RES *res = taos_query(pThreadInfo->taos, command); + int32_t code = taos_errno(res); + if (code) { + errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n", + __func__, __LINE__, command, taos_errstr(res)); + taos_free_result(res); + return NULL; } - } - *pstr = '\0'; -} -*/ + FILE *fp = NULL; + char tmpBuf[MAX_PATH_LEN] = {0}; -char *ascii_literal_list[] = { - "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c", - "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19", - "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&", - "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3", - "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@", - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g", - "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", - "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81", - "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e", - "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b", - "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8", - "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5", - "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2", - "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf", - "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc", - "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9", - "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6", - "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"}; + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.%s.%d.sql", + g_args.outpath, + pThreadInfo->dbName, + pThreadInfo->stbName, + pThreadInfo->threadIndex); + } else { + sprintf(tmpBuf, "%s.%s.%d.sql", + pThreadInfo->dbName, + pThreadInfo->stbName, + pThreadInfo->threadIndex); + } -static int converStringToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - while (size > 0) { - if (*pstr == '\0') break; - pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]); - pstr++; - size--; + fp = fopen(tmpBuf, "w"); + + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return NULL; } - *pbuf = '\0'; - return 0; -} -static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) { - char *pstr = str; - char *pbuf = buf; - wchar_t wc; - while (size > 0) { - if (*pstr == '\0') break; - int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX); - if (byte_width < 0) { - errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__); - exit(-1); - } + TAOS_ROW row = NULL; + int64_t i = 0; + int64_t count; + while((row = taos_fetch_row(res)) != NULL) { + debugPrint("[%d] sub table %"PRId64": name: %s\n", + pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); - if ((int)wc < 256) { - pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]); + count = dumpNormalTable( + pThreadInfo->taos, + pThreadInfo->dbName, + pThreadInfo->stbName, + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], + pThreadInfo->precision, + fp); + if (count < 0) { + break; } else { - memcpy(pbuf, pstr, byte_width); - pbuf += byte_width; + atomic_add_fetch_64(&g_totalDumpOutRows, count); } - pstr += byte_width; } - *pbuf = '\0'; + fclose(fp); + return NULL; +} + +static int64_t dumpNtbOfDbByThreads( + SDbInfo *dbInfo, + int64_t ntbCount) +{ + if (ntbCount <= 0) { + return 0; + } + + int threads = g_args.thread_num; - return 0; -} + int64_t a = ntbCount / threads; + if (a < 1) { + threads = ntbCount; + a = 1; + } -static void dumpCharset(FILE *fp) { - char charsetline[256]; + assert(threads); + int64_t b = ntbCount % threads; - (void)fseek(fp, 0, SEEK_SET); - sprintf(charsetline, "#!%s\n", tsCharset); - (void)fwrite(charsetline, strlen(charsetline), 1, fp); -} + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + assert(pids); + assert(infos); -static void loadFileCharset(FILE *fp, char *fcharset) { - char * line = NULL; - size_t line_size = 0; + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->taos = taos_connect( + g_args.host, + g_args.user, + g_args.password, + dbInfo->name, + g_args.port + ); + if (NULL == pThreadInfo->taos) { + errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", + __func__, + __LINE__, + taos_errstr(NULL)); + free(pids); + free(infos); - (void)fseek(fp, 0, SEEK_SET); - ssize_t size = getline(&line, &line_size, fp); - if (size <= 2) { - goto _exit_no_charset; + return -1; + } + + pThreadInfo->threadIndex = i; + pThreadInfo->count = (ifrom = (i==0)?0: + ((threadInfo *)(infos + i - 1))->from + + ((threadInfo *)(infos + i - 1))->count; + strcpy(pThreadInfo->dbName, dbInfo->name); + pThreadInfo->precision = getPrecisionByString(dbInfo->precision); + + pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo); } - if (strncmp(line, "#!", 2) != 0) { - goto _exit_no_charset; + for (int64_t i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); } - if (line[size - 1] == '\n') { - line[size - 1] = '\0'; - size--; + + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + taos_close(pThreadInfo->taos); } - strcpy(fcharset, line + 2); - tfree(line); - return; + free(pids); + free(infos); -_exit_no_charset: - (void)fseek(fp, 0, SEEK_SET); - *fcharset = '\0'; - tfree(line); - return; + return 0; } -// ======== dumpIn support multi threads functions ================================// - -static char **g_tsDumpInSqlFiles = NULL; -static int32_t g_tsSqlFileNum = 0; -static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0}; -static char g_tsCharset[64] = {0}; - -static int taosGetFilesNum(const char *directoryName, - const char *prefix, const char *prefix2) +static int64_t dumpNTablesOfDb(SDbInfo *dbInfo) { - char cmd[1024] = { 0 }; + TAOS *taos = taos_connect(g_args.host, + g_args.user, g_args.password, dbInfo->name, g_args.port); + if (NULL == taos) { + errorPrint( + "Failed to connect to TDengine server %s by specified database %s\n", + g_args.host, dbInfo->name); + return 0; + } - if (prefix2) - sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ", - directoryName, prefix, directoryName, prefix2); - else - sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix); + char command[COMMAND_SIZE]; + TAOS_RES *result; + int32_t code; - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(-1); + sprintf(command, "USE %s", dbInfo->name); + result = taos_query(taos, command); + code = taos_errno(result); + if (code != 0) { + errorPrint("invalid database %s, reason: %s\n", + dbInfo->name, taos_errstr(result)); + taos_close(taos); + return 0; } - int fileNum = 0; - if (fscanf(fp, "%d", &fileNum) != 1) { - errorPrint("failed to execute:%s, parse result error\n", cmd); - exit(-1); + sprintf(command, "SHOW TABLES"); + result = taos_query(taos, command); + code = taos_errno(result); + if (code != 0) { + errorPrint("Failed to show %s\'s tables, reason: %s\n", + dbInfo->name, taos_errstr(result)); + taos_close(taos); + return 0; } - if (fileNum <= 0) { - errorPrint("directory:%s is empty\n", directoryName); - exit(-1); + g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo)); + assert(g_tablesList); + + TAOS_ROW row; + int64_t count = 0; + while(NULL != (row = taos_fetch_row(result))) { + debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n", + __func__, __LINE__, + count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]); + tstrncpy(((TableInfo *)(g_tablesList + count))->name, + (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN); + char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX]; + if (stbName) { + tstrncpy(((TableInfo *)(g_tablesList + count))->stable, + (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN); + ((TableInfo *)(g_tablesList + count))->belongStb = true; + } + count ++; } + taos_close(taos); + + int64_t records = dumpNtbOfDbByThreads(dbInfo, count); + + free(g_tablesList); + g_tablesList = NULL; - pclose(fp); - return fileNum; + return records; } -static void taosParseDirectory(const char *directoryName, - const char *prefix, const char *prefix2, - char **fileArray, int totalFiles) +static int64_t dumpNtbOfStbByThreads( + SDbInfo *dbInfo, char *stbName) { - char cmd[1024] = { 0 }; + int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName); - if (prefix2) { - sprintf(cmd, "ls %s/*.%s %s/*.%s | sort", - directoryName, prefix, directoryName, prefix2); - } else { - sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix); + if (ntbCount <= 0) { + return 0; } - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(-1); - } + int threads = g_args.thread_num; - int fileNum = 0; - while (fscanf(fp, "%128s", fileArray[fileNum++])) { - if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) { - fileNum--; - } - if (fileNum >= totalFiles) { - break; - } + int64_t a = ntbCount / threads; + if (a < 1) { + threads = ntbCount; + a = 1; } - if (fileNum != totalFiles) { - errorPrint("directory:%s changed while read\n", directoryName); - pclose(fp); - exit(-1); - } + assert(threads); + int64_t b = ntbCount % threads; - pclose(fp); -} + pthread_t *pids = calloc(1, threads * sizeof(pthread_t)); + threadInfo *infos = calloc(1, threads * sizeof(threadInfo)); + assert(pids); + assert(infos); -static void taosCheckDatabasesSQLFile(const char *directoryName) -{ - char cmd[1024] = { 0 }; - sprintf(cmd, "ls %s/dbs.sql", directoryName); + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + pThreadInfo->taos = taos_connect( + g_args.host, + g_args.user, + g_args.password, + dbInfo->name, + g_args.port + ); + if (NULL == pThreadInfo->taos) { + errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n", + __func__, + __LINE__, + taos_errstr(NULL)); + free(pids); + free(infos); - FILE *fp = popen(cmd, "r"); - if (fp == NULL) { - errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno)); - exit(-1); - } + return -1; + } - while (fscanf(fp, "%128s", g_tsDbSqlFile)) { - break; - } + pThreadInfo->threadIndex = i; + pThreadInfo->count = (ifrom = (i==0)?0: + ((threadInfo *)(infos + i - 1))->from + + ((threadInfo *)(infos + i - 1))->count; + strcpy(pThreadInfo->dbName, dbInfo->name); + pThreadInfo->precision = getPrecisionByString(dbInfo->precision); - pclose(fp); -} + strcpy(pThreadInfo->stbName, stbName); + pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo); + } -static void taosMallocDumpFiles() -{ - g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*)); - for (int i = 0; i < g_tsSqlFileNum; i++) { - g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN); + for (int64_t i = 0; i < threads; i++) { + pthread_join(pids[i], NULL); } -} -static void freeDumpFiles() -{ - for (int i = 0; i < g_tsSqlFileNum; i++) { - tfree(g_tsDumpInSqlFiles[i]); + int64_t records = 0; + for (int64_t i = 0; i < threads; i++) { + threadInfo *pThreadInfo = infos + i; + records += pThreadInfo->rowsOfDumpOut; + taos_close(pThreadInfo->taos); } - tfree(g_tsDumpInSqlFiles); + + free(pids); + free(infos); + + return records; } -static void taosGetDirectoryFileList(char *inputDir) +static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp) { - struct stat fileStat; - if (stat(inputDir, &fileStat) < 0) { - errorPrint("%s not exist\n", inputDir); - exit(-1); - } + dumpCreateDbClause(dbInfo, g_args.with_property, fp); - if (fileStat.st_mode & S_IFDIR) { - taosCheckDatabasesSQLFile(inputDir); - if (g_args.avro) - g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro"); - else - g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL); + fprintf(g_fpOfResult, "\n#### database: %s\n", + dbInfo->name); + g_resultStatistics.totalDatabasesOfDumpOut++; - int tsSqlFileNumOfTbls = g_tsSqlFileNum; - if (g_tsDbSqlFile[0] != 0) { - tsSqlFileNumOfTbls--; - } - taosMallocDumpFiles(); - if (0 != tsSqlFileNumOfTbls) { - if (g_args.avro) { - taosParseDirectory(inputDir, "sql", "avro", - g_tsDumpInSqlFiles, tsSqlFileNumOfTbls); - } else { - taosParseDirectory(inputDir, "sql", NULL, - g_tsDumpInSqlFiles, tsSqlFileNumOfTbls); - } - } - fprintf(stdout, "\nstart to dispose %d files in %s\n", - g_tsSqlFileNum, inputDir); - } else { - errorPrint("%s is not a directory\n", inputDir); - exit(-1); - } -} + dumpCreateSTableClauseOfDb(dbInfo, fp); -static FILE* taosOpenDumpInFile(char *fptr) { - wordexp_t full_path; + return dumpNTablesOfDb(dbInfo); +} - if (wordexp(fptr, &full_path, 0) != 0) { - errorPrint("illegal file name: %s\n", fptr); - return NULL; - } +static int dumpOut() { + TAOS *taos = NULL; + TAOS_RES *result = NULL; - char *fname = full_path.we_wordv[0]; + TAOS_ROW row; + FILE *fp = NULL; + int32_t count = 0; - FILE *f = NULL; - if ((fname) && (strlen(fname) > 0)) { - f = fopen(fname, "r"); - if (f == NULL) { - errorPrint("%s() LN%d, failed to open file %s\n", - __func__, __LINE__, fname); - } + char tmpBuf[MAX_PATH_LEN] = {0}; + if (g_args.outpath[0] != 0) { + sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath); + } else { + sprintf(tmpBuf, "dbs.sql"); } - wordfree(&full_path); - return f; -} + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + errorPrint("%s() LN%d, failed to open file %s\n", + __func__, __LINE__, tmpBuf); + return -1; + } -static int dumpInOneFile(TAOS* taos, FILE* fp, char* fcharset, - char* encode, char* fileName) { - int read_len = 0; - char * cmd = NULL; - size_t cmd_len = 0; - char * line = NULL; - size_t line_len = 0; + g_args.dumpDbCount = getDumpDbCount(); + debugPrint("%s() LN%d, dump db count: %d\n", + __func__, __LINE__, g_args.dumpDbCount); - cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN); - if (cmd == NULL) { - errorPrint("%s() LN%d, failed to allocate memory\n", - __func__, __LINE__); + if (0 == g_args.dumpDbCount) { + errorPrint("%d databases valid to dump\n", g_args.dumpDbCount); + fclose(fp); return -1; } - int lastRowsPrint = 5000000; - int lineNo = 0; - while ((read_len = getline(&line, &line_len, fp)) != -1) { - ++lineNo; - if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue; - line[--read_len] = '\0'; + g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *)); + if (g_dbInfos == NULL) { + errorPrint("%s() LN%d, failed to allocate memory\n", + __func__, __LINE__); + goto _exit_failure; + } - //if (read_len == 0 || isCommentLine(line)) { // line starts with # - if (read_len == 0 ) { - continue; - } + char command[COMMAND_SIZE]; - if (line[read_len - 1] == '\\') { - line[read_len - 1] = ' '; - memcpy(cmd + cmd_len, line, read_len); - cmd_len += read_len; - continue; - } + /* Connect to server */ + taos = taos_connect(g_args.host, g_args.user, g_args.password, + NULL, g_args.port); + if (taos == NULL) { + errorPrint("Failed to connect to TDengine server %s\n", g_args.host); + goto _exit_failure; + } - memcpy(cmd + cmd_len, line, read_len); - cmd[read_len + cmd_len]= '\0'; - if (queryDbImpl(taos, cmd)) { - errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n", - __func__, __LINE__, lineNo, fileName); - fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName); - } + /* --------------------------------- Main Code -------------------------------- */ + /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */ + /* */ + dumpCharset(fp); - memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; + sprintf(command, "show databases"); + result = taos_query(taos, command); + int32_t code = taos_errno(result); - if (lineNo >= lastRowsPrint) { - printf(" %d lines already be executed from file %s\n", lineNo, fileName); - lastRowsPrint += 5000000; - } + if (code != 0) { + errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n", + __func__, __LINE__, command, taos_errstr(result)); + goto _exit_failure; } - tfree(cmd); - tfree(line); - fclose(fp); - return 0; -} + TAOS_FIELD *fields = taos_fetch_fields(result); -static void* dumpInWorkThreadFp(void *arg) -{ - threadInfo *pThread = (threadInfo*)arg; - setThreadName("dumpInWorkThrd"); + while ((row = taos_fetch_row(result)) != NULL) { + // sys database name : 'log', but subsequent version changed to 'log' + if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) + && (!g_args.allow_sys)) { + continue; + } - for (int32_t f = 0; f < g_tsSqlFileNum; ++f) { - if (f % pThread->totalThreads == pThread->threadIndex) { - char *SQLFileName = g_tsDumpInSqlFiles[f]; - FILE* fp = taosOpenDumpInFile(SQLFileName); - if (NULL == fp) { + if (g_args.databases) { // input multi dbs + if (inDatabasesSeq( + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) { continue; } - fprintf(stderr, ", Success Open input file: %s\n", - SQLFileName); - dumpInOneFile(pThread->taos, fp, g_tsCharset, g_args.encode, SQLFileName); + } else if (!g_args.all_databases) { // only input one db + if (strncasecmp(g_args.arg_list[0], + (char *)row[TSDB_SHOW_DB_NAME_INDEX], + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) + continue; } - } - return NULL; -} - -static void startDumpInWorkThreads() -{ - pthread_attr_t thattr; - threadInfo *pThread; - int32_t totalThreads = g_args.thread_num; - - if (totalThreads > g_tsSqlFileNum) { - totalThreads = g_tsSqlFileNum; - } + g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); + if (g_dbInfos[count] == NULL) { + errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n", + __func__, __LINE__, (uint64_t)sizeof(SDbInfo)); + goto _exit_failure; + } - threadInfo *threadObj = (threadInfo *)calloc( - totalThreads, sizeof(threadInfo)); + okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]); + tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], + min(TSDB_DB_NAME_LEN, + fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1)); + if (g_args.with_property) { + g_dbInfos[count]->ntables = + *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + g_dbInfos[count]->vgroups = + *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + g_dbInfos[count]->replica = + *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + g_dbInfos[count]->quorum = + *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); + g_dbInfos[count]->days = + *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - if (NULL == threadObj) { - errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__); - } + tstrncpy(g_dbInfos[count]->keeplist, + (char *)row[TSDB_SHOW_DB_KEEP_INDEX], + min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1)); + //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + //g_dbInfos[count]->daysToKeep1; + //g_dbInfos[count]->daysToKeep2; + g_dbInfos[count]->cache = + *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + g_dbInfos[count]->blocks = + *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + g_dbInfos[count]->minrows = + *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + g_dbInfos[count]->maxrows = + *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + g_dbInfos[count]->wallevel = + *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + g_dbInfos[count]->fsync = + *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + g_dbInfos[count]->comp = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + g_dbInfos[count]->cachelast = + (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - for (int32_t t = 0; t < totalThreads; ++t) { - pThread = threadObj + t; - pThread->threadIndex = t; - pThread->totalThreads = totalThreads; - pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (pThread->taos == NULL) { - errorPrint("Failed to connect to TDengine server %s\n", g_args.host); - free(threadObj); - return; + tstrncpy(g_dbInfos[count]->precision, + (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], + DB_PRECISION_LEN); + g_dbInfos[count]->update = + *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } - pthread_attr_init(&thattr); - pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + count++; - if (pthread_create(&(pThread->threadID), &thattr, - dumpInWorkThreadFp, (void*)pThread) != 0) { - errorPrint("%s() LN%d, thread:%d failed to start\n", - __func__, __LINE__, pThread->threadIndex); - exit(0); + if (g_args.databases) { + if (count > g_args.dumpDbCount) + break; + } else if (!g_args.all_databases) { + if (count >= 1) + break; } } - for (int t = 0; t < totalThreads; ++t) { - pthread_join(threadObj[t].threadID, NULL); - } - - for (int t = 0; t < totalThreads; ++t) { - taos_close(threadObj[t].taos); + if (count == 0) { + errorPrint("%d databases valid to dump\n", count); + goto _exit_failure; } - free(threadObj); -} - -static int dumpIn() { - assert(g_args.isDumpIn); - TAOS *taos = NULL; - FILE *fp = NULL; + if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases + for (int i = 0; i < count; i++) { + int64_t records = 0; + records = dumpWholeDatabase(g_dbInfos[i], fp); + if (records >= 0) { + okPrint("Database %s dumped\n", g_dbInfos[i]->name); + g_totalDumpOutRows += records; + } + } + } else { + if (1 == g_args.arg_list_len) { + int64_t records = dumpWholeDatabase(g_dbInfos[0], fp); + if (records >= 0) { + okPrint("Database %s dumped\n", g_dbInfos[0]->name); + g_totalDumpOutRows += records; + } + } else { + dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp); + } - taos = taos_connect( - g_args.host, g_args.user, g_args.password, - NULL, g_args.port); - if (taos == NULL) { - errorPrint("%s() LN%d, failed to connect to TDengine server\n", - __func__, __LINE__); - return -1; - } + int superTblCnt = 0 ; + for (int i = 1; g_args.arg_list[i]; i++) { + TableRecordInfo tableRecordInfo; - taosGetDirectoryFileList(g_args.inpath); + if (getTableRecordInfo(g_dbInfos[0]->name, + g_args.arg_list[i], + &tableRecordInfo) < 0) { + errorPrint("input the invalid table %s\n", + g_args.arg_list[i]); + continue; + } - int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum; - if (g_tsDbSqlFile[0] != 0) { - tsSqlFileNumOfTbls--; + int64_t records = 0; + if (tableRecordInfo.isStb) { // dump all table of this stable + int ret = dumpStableClasuse( + taos, + g_dbInfos[0], + tableRecordInfo.tableRecord.stable, + fp); + if (ret >= 0) { + superTblCnt++; + records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]); + } + } else if (tableRecordInfo.belongStb){ + dumpStableClasuse( + taos, + g_dbInfos[0], + tableRecordInfo.tableRecord.stable, + fp); + records = dumpNormalTableBelongStb( + taos, + g_dbInfos[0], + tableRecordInfo.tableRecord.stable, + g_args.arg_list[i]); + } else { + records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]); + } - fp = taosOpenDumpInFile(g_tsDbSqlFile); - if (NULL == fp) { - errorPrint("%s() LN%d, failed to open input file %s\n", - __func__, __LINE__, g_tsDbSqlFile); - return -1; + if (records >= 0) { + okPrint("table: %s dumped\n", g_args.arg_list[i]); + g_totalDumpOutRows += records; + } } - fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile); - - loadFileCharset(fp, g_tsCharset); - - dumpInOneFile(taos, fp, g_tsCharset, g_args.encode, - g_tsDbSqlFile); } taos_close(taos); - if (0 != tsSqlFileNumOfTbls) { - startDumpInWorkThreads(); - } - - freeDumpFiles(); + /* Close the handle and return */ + fclose(fp); + taos_free_result(result); + freeDbInfos(); + fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows); return 0; + +_exit_failure: + fclose(fp); + taos_close(taos); + taos_free_result(result); + freeDbInfos(); + errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows); + return -1; } int main(int argc, char *argv[]) { @@ -2988,7 +3926,10 @@ int main(int argc, char *argv[]) { printf("databasesSeq: %s\n", g_args.databasesSeq); printf("schemaonly: %s\n", g_args.schemaonly?"true":"false"); printf("with_property: %s\n", g_args.with_property?"true":"false"); +#ifdef AVRO_SUPPORT printf("avro format: %s\n", g_args.avro?"true":"false"); + printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]); +#endif printf("start_time: %" PRId64 "\n", g_args.start_time); printf("human readable start time: %s \n", g_args.humanStartTime); printf("end_time: %" PRId64 "\n", g_args.end_time); @@ -3042,7 +3983,10 @@ int main(int argc, char *argv[]) { fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq); fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false"); fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false"); +#ifdef AVRO_SUPPORT fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false"); + fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]); +#endif fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time); fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime); fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time); @@ -3072,6 +4016,7 @@ int main(int argc, char *argv[]) { tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); if (dumpIn() < 0) { + errorPrint("%s\n", "dumpIn() failed!"); ret = -1; } } else { @@ -3103,4 +4048,3 @@ int main(int argc, char *argv[]) { return ret; } - diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt index 4cf444bab2f05816c1af55d96156334800d758d5..075525a0684b332405a23011b0f7f501658d911e 100644 --- a/src/plugins/CMakeLists.txt +++ b/src/plugins/CMakeLists.txt @@ -1,26 +1,6 @@ CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20) PROJECT(TDengine) -if(NOT WIN32) - string(ASCII 27 Esc) - set(ColourReset "${Esc}[m") - set(ColourBold "${Esc}[1m") - set(Red "${Esc}[31m") - set(Green "${Esc}[32m") - set(Yellow "${Esc}[33m") - set(Blue "${Esc}[34m") - set(Magenta "${Esc}[35m") - set(Cyan "${Esc}[36m") - set(White "${Esc}[37m") - set(BoldRed "${Esc}[1;31m") - set(BoldGreen "${Esc}[1;32m") - set(BoldYellow "${Esc}[1;33m") - set(BoldBlue "${Esc}[1;34m") - set(BoldMagenta "${Esc}[1;35m") - set(BoldCyan "${Esc}[1;36m") - set(BoldWhite "${Esc}[1;37m") -endif() - ADD_SUBDIRECTORY(monitor) IF (TD_BUILD_HTTP) @@ -30,35 +10,42 @@ IF (TD_BUILD_HTTP) ADD_SUBDIRECTORY(http) ELSE () MESSAGE("") - MESSAGE("${Green} use blm3 as httpd ${ColourReset}") + MESSAGE("${Green} use taosadapter as httpd ${ColourReset}") EXECUTE_PROCESS( - COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/blm3 + COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter ) EXECUTE_PROCESS( COMMAND git rev-parse --short HEAD RESULT_VARIABLE commit_sha1 - OUTPUT_VARIABLE blm3_commit_sha1 + OUTPUT_VARIABLE taosadapter_commit_sha1 ) - IF ("${blm3_commit_sha1}" STREQUAL "") - SET(blm3_commit_sha1 "unknown") + IF ("${taosadapter_commit_sha1}" STREQUAL "") + SET(taosadapter_commit_sha1 "unknown") ELSE () - STRING(SUBSTRING "${blm3_commit_sha1}" 0 7 blm3_commit_sha1) - STRING(STRIP "${blm3_commit_sha1}" blm3_commit_sha1) + STRING(SUBSTRING "${taosadapter_commit_sha1}" 0 7 taosadapter_commit_sha1) + STRING(STRIP "${taosadapter_commit_sha1}" taosadapter_commit_sha1) ENDIF () - MESSAGE("${Green} blm3 commit: ${blm3_commit_sha1} ${ColourReset}") + MESSAGE("${Green} taosadapter commit: ${taosadapter_commit_sha1} ${ColourReset}") EXECUTE_PROCESS( COMMAND cd .. ) include(ExternalProject) - ExternalProject_Add(blm3 - PREFIX "blm3" - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/blm3 + ExternalProject_Add(taosadapter + PREFIX "taosadapter" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter BUILD_ALWAYS off DEPENDS taos BUILD_IN_SOURCE 1 - CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config" - BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}" - INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/ + CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" + INSTALL_COMMAND + COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 > /dev/null && ./upx taosadapter || : + COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/ ) ENDIF () diff --git a/src/plugins/blm3 b/src/plugins/blm3 deleted file mode 160000 index ba539ce69dc4fe53536e9b0517fe75917dce5c46..0000000000000000000000000000000000000000 --- a/src/plugins/blm3 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ba539ce69dc4fe53536e9b0517fe75917dce5c46 diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index ccbcc985118b132369a1ee3895f4341e6cca6d59..f26a4b4c8bdda05f801075b70c1b762882adfd27 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -123,7 +123,7 @@ HttpContext *httpCreateContext(SOCKET fd) { TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext; HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext, - sizeof(TSDB_CACHE_PTR_TYPE), 3000); + sizeof(TSDB_CACHE_PTR_TYPE), tsHttpKeepAlive); pContext->ppContext = ppContext; httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext); diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter new file mode 160000 index 0000000000000000000000000000000000000000..6397bf5963f62f0aa5c4b9b961b16ed5c62579f1 --- /dev/null +++ b/src/plugins/taosadapter @@ -0,0 +1 @@ +Subproject commit 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1 diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index aabdc09612d8f805dd11586b72c69b3366f9fc0a..ed54723adeafdcd3cdff8b438d2f823a73a04a33 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -43,6 +43,8 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int #define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows) +#define RESET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:(((_r)->outputBuf)->info.rows = 0)) + #define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData? 1 : 0) enum { @@ -353,16 +355,16 @@ enum OPERATOR_TYPE_E { typedef struct SOperatorInfo { uint8_t operatorType; - bool blockingOptr; // block operator or not - uint8_t status; // denote if current operator is completed - int32_t numOfOutput; // number of columns of the current operator results - char *name; // name, used to show the query execution plan - void *info; // extension attribution + bool blockingOptr; // block operator or not + uint8_t status; // denote if current operator is completed + int32_t numOfOutput; // number of columns of the current operator results + char *name; // name, used to show the query execution plan + void *info; // extension attribution SExprInfo *pExpr; SQueryRuntimeEnv *pRuntimeEnv; - struct SOperatorInfo **upstream; // upstream pointer list - int32_t numOfUpstream; // number of upstream. The value is always ONE expect for join operator + struct SOperatorInfo **upstream; // upstream pointer list + int32_t numOfUpstream; // number of upstream. The value is always ONE expect for join operator __operator_fn_t exec; __optr_cleanup_fn_t cleanup; } SOperatorInfo; @@ -393,6 +395,7 @@ typedef struct SQInfo { int32_t dataReady; // denote if query result is ready or not void* rspContext; // response context int64_t startExecTs; // start to exec timestamp + int64_t lastRetrieveTs; // last retrieve timestamp char* sql; // query sql string SQueryCostInfo summary; } SQInfo; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index cef76bb6cc69a3c7781da948a4ef289602eb5aec..b0c601c5d86888b8c55d441315632b282e28fe25 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3178,7 +3178,14 @@ static void deriv_function(SQLFunctionCtx *pCtx) { default: qError("error input type"); } - + if (notNullElems > 0) { + for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { + SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; + if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { + aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); + } + } + } GET_RES_INFO(pCtx)->numOfRes += notNullElems; } @@ -3353,6 +3360,12 @@ static void diff_function(SQLFunctionCtx *pCtx) { */ assert(pCtx->hasNull); } else { + for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { + SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; + if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { + aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); + } + } int32_t forwardStep = (isFirstBlock) ? notNullElems - 1 : notNullElems; GET_RES_INFO(pCtx)->numOfRes += forwardStep; @@ -4727,8 +4740,6 @@ static void csum_function(SQLFunctionCtx *pCtx) { TSKEY* pTimestamp = pCtx->ptsOutputBuf; TSKEY* tsList = GET_TS_LIST(pCtx); - qDebug("%p csum_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull); - for (; i < pCtx->size && i >= 0; i += step) { char* pData = GET_INPUT_DATA(pCtx, i); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { @@ -4770,6 +4781,12 @@ static void csum_function(SQLFunctionCtx *pCtx) { if (notNullElems == 0) { assert(pCtx->hasNull); } else { + for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { + SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; + if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { + aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); + } + } GET_RES_INFO(pCtx)->numOfRes += notNullElems; GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } @@ -4843,6 +4860,12 @@ static void mavg_function(SQLFunctionCtx *pCtx) { if (notNullElems <= 0) { assert(pCtx->hasNull); } else { + for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { + SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; + if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { + aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); + } + } GET_RES_INFO(pCtx)->numOfRes += notNullElems; GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG; } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 19fd5160b524512bdfd2ad3c825a4dd3f280e42a..0382dd4ee6e7d42d6a1e6060de76795b05a0b5a5 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -2448,7 +2448,7 @@ bool isQueryKilled(SQInfo *pQInfo) { // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived // abort current query execution. - if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs/1000) > getMaximumIdleDurationSec()) && + if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->lastRetrieveTs/1000) > getMaximumIdleDurationSec()) && (!needBuildResAfterQueryComplete(pQInfo))) { assert(pQInfo->startExecTs != 0); @@ -4231,7 +4231,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* int32_t start = 0; int32_t step = -1; - qDebug("QInfo:0x%"PRIx64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv)); + qDebug("QInfo:0x%"PRIx64" start to copy data from resultrowInfo to output buf", GET_QID(pRuntimeEnv)); assert(orderType == TSDB_ORDER_ASC || orderType == TSDB_ORDER_DESC); if (orderType == TSDB_ORDER_ASC) { @@ -4347,31 +4347,16 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data compSizes = tcalloc(numOfCols, sizeof(int32_t)); } - if (pQueryAttr->pExpr2 == NULL) { - for (int32_t col = 0; col < numOfCols; ++col) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); - if (compressed) { - compSizes[col] = compressQueryColData(pColRes, pRes->info.rows, data, compressed); - data += compSizes[col]; - *compLen += compSizes[col]; - compSizes[col] = htonl(compSizes[col]); - } else { - memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows); - data += pColRes->info.bytes * pRes->info.rows; - } - } - } else { - for (int32_t col = 0; col < numOfCols; ++col) { - SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); - if (compressed) { - compSizes[col] = htonl(compressQueryColData(pColRes, numOfRows, data, compressed)); - data += compSizes[col]; - *compLen += compSizes[col]; - compSizes[col] = htonl(compSizes[col]); - } else { - memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows); - data += pColRes->info.bytes * numOfRows; - } + for (int32_t col = 0; col < numOfCols; ++col) { + SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col); + if (compressed) { + compSizes[col] = compressQueryColData(pColRes, numOfRows, data, compressed); + data += compSizes[col]; + *compLen += compSizes[col]; + compSizes[col] = htonl(compSizes[col]); + } else { + memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows); + data += pColRes->info.bytes * numOfRows; } } @@ -5235,7 +5220,6 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pInfo->reverseTimes = 0; pInfo->order = pRuntimeEnv->pQueryAttr->order.order; pInfo->current = 0; -// pInfo->prevGroupId = -1; SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); pOperator->name = "TableScanOperator"; diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 3b5f6a9d439f827da66cf829050b4e1d4440d69d..a150f3a717afaa0ddd79a33a9c8be5285c327574 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -448,7 +448,9 @@ int32_t tsDescOrder(const void* p1, const void* p2) { } } -void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { +void + +orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) { __compar_fn_t fn = NULL; if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) { fn = tsAscOrder; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index c6e6eddce7d8f56095d5d78f4d1f84ed1d4f3c97..fce7f649892f87d075c8dd64e4d1160e5d05bf77 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -272,8 +272,10 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { } *qId = pQInfo->qId; - if(pQInfo->startExecTs == 0) + if(pQInfo->startExecTs == 0) { pQInfo->startExecTs = taosGetTimestampMs(); + pQInfo->lastRetrieveTs = pQInfo->startExecTs; + } if (isQueryKilled(pQInfo)) { qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId); @@ -412,6 +414,9 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co setQueryStatus(pRuntimeEnv, QUERY_OVER); } + RESET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv)); + pQInfo->lastRetrieveTs = taosGetTimestampMs(); + if ((*pRsp)->compressed && compLen != 0) { int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput; int32_t origSize = pQueryAttr->resultRowSize * s; diff --git a/src/query/src/tdigest.c b/src/query/src/tdigest.c index 109fd7574f04a7f82e92f112551ca9494c7e667a..4870d1ff60d2cd7db69a01587d5e48515bdf67d7 100644 --- a/src/query/src/tdigest.c +++ b/src/query/src/tdigest.c @@ -296,7 +296,7 @@ double tdigestQuantile(TDigest *t, double q) { a = b; right = t->max; - if (idx < weight_so_far + a->weight) { + if (idx < weight_so_far + a->weight && a->weight != 0) { double p = (idx - weight_so_far) / a->weight; return left * (1 - p) + right * p; } diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index c86ab8549974712658ad3d381c4141427c000762..623d6e3cc0c20ef3c69b4ebfb6752616c1ff56b0 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -233,7 +233,7 @@ static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index) int32_t once = 0; // last WAL has once ever been processed int64_t offset = 0; uint64_t fversion = 0; - char fname[TSDB_FILENAME_LEN * 2] = {0}; // full path to wal file + char fname[TSDB_FILENAME_LEN * 3] = {0}; // full path to wal file // get full path to wal file snprintf(fname, sizeof(fname), "%s/%s", pNode->path, wname); diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 52e22dcce726c4e834ed88792053b839bf21ac0a..7ac37a72b44483ed2e3b25bbdbb40cd00c1958c8 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -229,7 +229,7 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) { SBlockIdx *pBlkIdx; size_t nidx = taosArrayGetSize(pIdxA); int tlen = 0, size; - int64_t offset; + int64_t offset = 0; if (nidx <= 0) { // All data are deleted @@ -1186,7 +1186,7 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDFile return -1; } - uint32_t aggrStatus = ((nColsNotAllNull > 0) && (rowsToWrite > 8)) ? 1 : 0; // TODO: How to make the decision? + uint32_t aggrStatus = nColsNotAllNull > 0 ? 1 : 0; if (aggrStatus > 0) { taosCalcChecksumAppend(0, (uint8_t *)pAggrBlkData, tsizeAggr); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 9ae73f9b92a5ef04c4b57c34ffa166e939b572e9..bf9206445926b8151861fc3366d8327a0077a87f 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1544,7 +1544,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity, int16_t offset; bool isRow1DataRow = isDataRow(row1); - bool isRow2DataRow; + bool isRow2DataRow = false; bool isChosenRowDataRow; int32_t chosen_itr; void *value; @@ -3452,9 +3452,12 @@ void filterPrepare(void* expr, void* param) { int dummy = -1; SHashObj *pObj = NULL; if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) { - pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false); SArray *arr = (SArray *)(pCond->arr); - for (size_t i = 0; i < taosArrayGetSize(arr); i++) { + + size_t size = taosArrayGetSize(arr); + pObj = taosHashInit(size * 2, taosGetDefaultHashFunction(pInfo->sch.type), true, false); + + for (size_t i = 0; i < size; i++) { char* p = taosArrayGetP(arr, i); strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p)); taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy)); @@ -3462,12 +3465,14 @@ void filterPrepare(void* expr, void* param) { } else { buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen); } + pInfo->q = (char *)pObj; } else if (pCond != NULL) { uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE; if (size < (uint32_t)pSchema->bytes) { size = pSchema->bytes; } + // to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(wchar_t) space. pInfo->q = calloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); tVariantDump(pCond, pInfo->q, pSchema->type, true); @@ -3615,7 +3620,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC return pTableGroup; } -int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, +int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len, STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) { if (tsdbRLockRepoMeta(tsdb) < 0) goto _error; @@ -3677,19 +3682,19 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons } END_TRY void *filterInfo = NULL; - + ret = filterInitFromTree(expr, &filterInfo, 0); if (ret != TSDB_CODE_SUCCESS) { terrno = ret; goto _error; } - + tsdbQueryTableList(pTable, res, filterInfo); filterFreeInfo(filterInfo); tExprTreeDestroy(expr, NULL); - + pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res); pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey); @@ -3876,7 +3881,7 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) { static FORCE_INLINE int32_t tsdbGetTagDataFromId(void *param, int32_t id, void **data) { STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode *)param)); - + if (id == TSDB_TBNAME_COLUMN_INDEX) { *data = TABLE_NAME(pTable); } else { @@ -3909,7 +3914,7 @@ static void queryIndexedColumn(SSkipList* pSkipList, void* filterInfo, SArray* r iter = tSkipListCreateIterFromVal(pSkipList, startVal, pSkipList->type, TSDB_ORDER_DESC); FILTER_CLR_FLAG(order, TSDB_ORDER_DESC); } - + while (tSkipListIterNext(iter)) { SSkipListNode *pNode = tSkipListIterGet(iter); @@ -3918,7 +3923,7 @@ static void queryIndexedColumn(SSkipList* pSkipList, void* filterInfo, SArray* r filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId); all = filterExecute(filterInfo, 1, &addToResult, NULL, 0); } - + char *pData = SL_GET_NODE_DATA(pNode); tsdbDebug("filter index column, table:%s, result:%d", ((STable *)pData)->name->data, all); @@ -3950,7 +3955,7 @@ static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray* SSkipListNode *pNode = tSkipListIterGet(iter); filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId); - + char *pData = SL_GET_NODE_DATA(pNode); bool all = filterExecute(filterInfo, 1, &addToResult, NULL, 0); @@ -3958,7 +3963,7 @@ static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray* if (all || (addToResult && *addToResult)) { STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL}; taosArrayPush(res, &info); - } + } } tfree(addToResult); @@ -3971,9 +3976,9 @@ static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo STSchema* pTSSchema = pTable->tagSchema; bool indexQuery = false; SSkipList *pSkipList = pTable->pIndex; - + filterIsIndexedColumnQuery(filterInfo, pTSSchema->columns->colId, &indexQuery); - + if (indexQuery) { queryIndexedColumn(pSkipList, filterInfo, pRes); } else { diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 258a29b90b40f4a5a630c17328a927923e1f1be6..c52fbf208f6fbf0384ecf66650919c4d12ae352e 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -20,7 +20,7 @@ extern "C" { #endif -#define TSDB_CFG_MAX_NUM 128 +#define TSDB_CFG_MAX_NUM 130 #define TSDB_CFG_PRINT_LEN 23 #define TSDB_CFG_OPTION_LEN 24 #define TSDB_CFG_VALUE_LEN 41 diff --git a/src/util/src/talgo.c b/src/util/src/talgo.c index 54b7e00eb7dd6f31ac8c8e6afa89790846abac5b..352cd3c05e4d588900b676b605964e068c4ed191 100644 --- a/src/util/src/talgo.c +++ b/src/util/src/talgo.c @@ -230,7 +230,7 @@ void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const { int32_t parent; int32_t child; - char *buf; + char *buf = NULL; if (base && size > 0 && compar) { parent = start; diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index 0d335ca2664ffee75a79144b97181a5b625df66d..a2eea5aa7d99a43f2cf7f0552e843ce9a52034c0 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -566,7 +566,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen) int32_t end = 0; int32_t remainSize = 0; static int64_t lostLine = 0; - char tmpBuf[40] = {0}; + char tmpBuf[60] = {0}; int32_t tmpBufLen = 0; if (tLogBuff == NULL || tLogBuff->stop) return -1; diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index 6a37f11ecef376e70f4eefbf6446150bd350cf07..1ffa94b0df6b63dac914649c7003d37bbedbdb24 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -258,9 +258,9 @@ void taosCloseQset(taos_qset param) { pthread_mutex_unlock(&qset->mutex); pthread_mutex_destroy(&qset->mutex); + uTrace("qset:%p is closed", qset); tsem_destroy(&qset->sem); free(qset); - uTrace("qset:%p is closed", qset); } // tsem_post 'qset->sem', so that reader threads waiting for it diff --git a/src/util/src/tworker.c b/src/util/src/tworker.c index 8b4053bccd1ce8d9d3f58328d838f4ba5132a100..55604b417ee7d32dd174df01ef4f170923ddb327 100644 --- a/src/util/src/tworker.c +++ b/src/util/src/tworker.c @@ -91,6 +91,6 @@ void *tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle) { } void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue) { - taosCloseQueue(pQueue); uDebug("worker:%s, queue:%p is freed", pPool->name, pQueue); + taosCloseQueue(pQueue); } diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 8cf0356721f8ffd568e87fa4a77c86eb0f90a62b..5f0e35fa8f739453651ce3a7af092437531c00c6 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -17,7 +17,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.34 + 2.0.35 diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/BatchInsert.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/BatchInsert.java new file mode 100644 index 0000000000000000000000000000000000000000..a2566bd07e96ddd245c9b548a3192d0e4f343147 --- /dev/null +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/BatchInsert.java @@ -0,0 +1,87 @@ +package com.taosdata.example; + +import java.sql.*; +import java.util.*; + +public class BatchInsert { + + private static final String host = "127.0.0.1"; + private static final String user = "root"; + private static final String password = "taosdata"; + + + private static final String dbname = "test"; + private static final String stbname = "stb"; + private static final int tables= 100; + private static final int rows = 500; + private static final long ts = 1604877767000l; + + private Connection conn; + + private void init() { + // final String url = "jdbc:TAOS://" + host + ":6030/?user=" + user + "&password=" + password; + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=" + user + "&password=" + password; + + // get connection + try { + Properties properties = new Properties(); + properties.setProperty("charset", "UTF-8"); + properties.setProperty("locale", "en_US.UTF-8"); + properties.setProperty("timezone", "UTC-8"); + System.out.println("get connection starting..."); + conn = DriverManager.getConnection(url, properties); + if (conn != null){ + System.out.println("[ OK ] Connection established."); + } + + Statement stmt = conn.createStatement(); + + stmt.execute("drop database if exists " + dbname); + stmt.execute("create database if not exists " + dbname); + stmt.execute("use " + dbname); + stmt.execute("create table " + dbname + "." + stbname + "(ts timestamp, col int) tags(id int)"); + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + private String generateSql() { + StringBuilder sb = new StringBuilder(); + Random rand = new Random(); + sb.append("insert into "); + for (int i = 0; i < tables; i++) { + sb.append(dbname + ".tb" + i + " using " + dbname + "." + stbname + " tags(" + i + ") values"); + for (int j = 0; j < rows; j++) { + sb.append("("); + sb.append(ts + j); + sb.append(","); + sb.append(rand.nextInt(1000)); + sb.append(") "); + } + } + return sb.toString(); + } + + private void executeQuery(String sql) { + try (Statement stmt = conn.createStatement()) { + long start = System.currentTimeMillis(); + stmt.execute(sql); + long end = System.currentTimeMillis(); + + System.out.println("insert " + tables * rows + " records, cost " + (end - start)+ "ms"); + } catch (SQLException ex) { + ex.printStackTrace(); + } + } + + public static void main(String[] args) { + BatchInsert bi = new BatchInsert(); + + String sql = bi.generateSql(); + bi.init(); + bi.executeQuery(sql); + } + + +} diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml index 34518900ed30f48effd47a8786233080f3e5291f..81c549274c81ddc69d52508c46cd215edd8c5467 100644 --- a/tests/examples/JDBC/connectionPools/pom.xml +++ b/tests/examples/JDBC/connectionPools/pom.xml @@ -18,7 +18,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.18 + 2.0.34 diff --git a/tests/examples/JDBC/readme.md b/tests/examples/JDBC/readme.md index 9a017f4feab148cb7c3fd4132360c3075c6573cb..35dfb341d7d62bb283897523f928e04dabea962d 100644 --- a/tests/examples/JDBC/readme.md +++ b/tests/examples/JDBC/readme.md @@ -10,4 +10,4 @@ | 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces | -more detail: https://www.taosdata.com/cn//documentation20/connector-java/ \ No newline at end of file +more detail: https://www.taosdata.com/cn/documentation20/connector/java diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index f3751029f473399ccd52b8f10339f0979367a704..8af0c4642e7452cf835442b17e28d9d7c498bde0 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -27,7 +27,7 @@ python3 ./test.py -f insert/bug3654.py python3 ./test.py -f insert/insertDynamicColBeforeVal.py python3 ./test.py -f insert/in_function.py python3 ./test.py -f insert/modify_column.py -python3 ./test.py -f insert/line_insert.py +#python3 ./test.py -f insert/line_insert.py python3 ./test.py -f insert/specialSql.py # timezone @@ -416,9 +416,9 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py python3 ./test.py -f query/queryRegex.py python3 ./test.py -f tools/taosdemoTestdatatype.py -python3 ./test.py -f insert/schemalessInsert.py -python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py -python3 ./test.py -f insert/openTsdbJsonInsert.py +#python3 ./test.py -f insert/schemalessInsert.py +#python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py +#python3 ./test.py -f insert/openTsdbJsonInsert.py #======================p4-end=============== diff --git a/tests/pytest/functions/queryTestCases-td3690.py b/tests/pytest/functions/queryTestCases-td3690.py new file mode 100644 index 0000000000000000000000000000000000000000..12b8d9dc90f063bfff96fceb39641b5352c6ec11 --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td3690.py @@ -0,0 +1,1588 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + # master branch + self.td3690() + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td4082.py b/tests/pytest/functions/queryTestCases-td4082.py new file mode 100644 index 0000000000000000000000000000000000000000..73f03530a4db222b199352ec582617db394f34dd --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td4082.py @@ -0,0 +1,1586 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + self.td4082() + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td4097.py b/tests/pytest/functions/queryTestCases-td4097.py new file mode 100644 index 0000000000000000000000000000000000000000..99c5f569825f631c9401a8db9994263834b30389 --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td4097.py @@ -0,0 +1,1587 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + self.td4097() + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td4288.py b/tests/pytest/functions/queryTestCases-td4288.py new file mode 100644 index 0000000000000000000000000000000000000000..855dbd3bd8c6921fd787a137bf228f55765ab5f2 --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td4288.py @@ -0,0 +1,1587 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + self.td4288() + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td4724.py b/tests/pytest/functions/queryTestCases-td4724.py new file mode 100644 index 0000000000000000000000000000000000000000..be3aa4be9b7811569148b6e1c3f708427e132567 --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td4724.py @@ -0,0 +1,1587 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + self.td4724() + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td5790.py b/tests/pytest/functions/queryTestCases-td5790.py new file mode 100644 index 0000000000000000000000000000000000000000..8d7cfe68adc913e53bff098a446350325b0325ab --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td5790.py @@ -0,0 +1,1588 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + # master branch + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + self.td5798() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td5935.py b/tests/pytest/functions/queryTestCases-td5935.py new file mode 100644 index 0000000000000000000000000000000000000000..b3e925a400f6b0850753697b9766e14f6c0faac8 --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td5935.py @@ -0,0 +1,1587 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + self.td5935() + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/functions/queryTestCases-td6068.py b/tests/pytest/functions/queryTestCases-td6068.py new file mode 100644 index 0000000000000000000000000000000000000000..1c3ffd998df391dd7f870fc73fcad01f484a914a --- /dev/null +++ b/tests/pytest/functions/queryTestCases-td6068.py @@ -0,0 +1,1588 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import subprocess +import random +import math +import numpy as np +import inspect + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self) -> str: + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg" + else: + cfgDir = self.getBuildPath() + "/sim/dnode1/cfg" + return cfgDir + + def getCfgFile(self) -> str: + return self.getCfgDir()+"/taos.cfg" + + def td3690(self): + tdLog.printNoPrefix("==========TD-3690==========") + + tdSql.prepare() + + tdSql.execute("show variables") + res_off = tdSql.cursor.fetchall() + resList = np.array(res_off) + index = np.where(resList == "offlineThreshold") + index_value = np.dstack((index[0])).squeeze() + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 864000) + + def td4082(self): + tdLog.printNoPrefix("==========TD-4082==========") + tdSql.prepare() + + cfgfile = self.getCfgFile() + max_compressMsgSize = 100000000 + + tdSql.execute("show variables") + res_com = tdSql.cursor.fetchall() + rescomlist = np.array(res_com) + cpms_index = np.where(rescomlist == "compressMsgSize") + index_value = np.dstack((cpms_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 100000000) + + tdDnodes.stop(index) + cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} " + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.query("show variables") + tdSql.checkData(index_value, 1, -1) + + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + def td4097(self): + tdLog.printNoPrefix("==========TD-4097==========") + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("create database if not exists db1 keep 3650") + tdSql.execute("create database if not exists new keep 3650") + tdSql.execute("create database if not exists private keep 3650") + tdSql.execute("create database if not exists db2 keep 3650") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)") + tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)") + + tdSql.execute("create table db.t10 using db.stb1 tags(1)") + tdSql.execute("create table db.t11 using db.stb1 tags(2)") + tdSql.execute("create table db.t20 using db.stb2 tags(3)") + tdSql.execute("create table db1.t30 using db1.stb3 tags(4)") + + # tdLog.printNoPrefix("==========TD-4097==========") + # 插入数据,然后进行show create 操作 + + # p1 不进入指定数据库 + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.query("show create database db2") + tdSql.checkRows(1) + tdSql.query("show create database new") + tdSql.checkRows(1) + tdSql.query("show create database private") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stable stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable db.stb1, db.stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.error("show create table db.stb0") + tdSql.error("show create table stb1") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.stb1, db.stb2") + + # p2 进入指定数据库 + tdSql.execute("use db") + + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create database db1") + tdSql.checkRows(1) + tdSql.error("show create database ") + tdSql.error("show create databases db ") + tdSql.error("show create database db.stb1") + tdSql.error("show create database db0") + tdSql.error("show create database db db1") + tdSql.error("show create database db, db1") + tdSql.error("show create database stb1") + tdSql.error("show create database * ") + + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create stable db1.stb3") + tdSql.checkRows(1) + tdSql.error("show create stable db.t10") + tdSql.error("show create stable db") + tdSql.error("show create stable t10") + tdSql.error("show create stable db.stb0") + tdSql.error("show create stables stb1") + tdSql.error("show create stable ") + tdSql.error("show create stable *") + tdSql.error("show create stable db.stb1 db.stb2") + tdSql.error("show create stable stb1 stb2") + tdSql.error("show create stable db.stb1, db.stb2") + tdSql.error("show create stable stb1, stb2") + + tdSql.query("show create table db.stb1") + tdSql.checkRows(1) + tdSql.query("show create table stb1") + tdSql.checkRows(1) + tdSql.query("show create table db.t10") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + tdSql.query("show create table db1.t30") + tdSql.checkRows(1) + tdSql.error("show create table t30") + tdSql.error("show create table db.stb0") + tdSql.error("show create table db.t0") + tdSql.error("show create table db") + tdSql.error("show create tables stb1") + tdSql.error("show create tables t10") + tdSql.error("show create table ") + tdSql.error("show create table *") + tdSql.error("show create table db.stb1 db.stb2") + tdSql.error("show create table db.t11 db.t10") + tdSql.error("show create table db.stb1, db.stb2") + tdSql.error("show create table db.t11, db.t10") + tdSql.error("show create table stb1 stb2") + tdSql.error("show create table t11 t10") + tdSql.error("show create table stb1, stb2") + tdSql.error("show create table t11, t10") + + # p3 删库删表后进行查询 + tdSql.execute("drop table if exists t11") + + tdSql.error("show create table t11") + tdSql.error("show create table db.t11") + tdSql.query("show create stable stb1") + tdSql.checkRows(1) + tdSql.query("show create table t10") + tdSql.checkRows(1) + + tdSql.execute("drop stable if exists stb2") + + tdSql.error("show create table stb2") + tdSql.error("show create table db.stb2") + tdSql.error("show create stable stb2") + tdSql.error("show create stable db.stb2") + tdSql.error("show create stable db.t20") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db1") + tdSql.error("show create database db1") + tdSql.error("show create stable db1.t31") + tdSql.error("show create stable db1.stb3") + tdSql.query("show create database db") + tdSql.checkRows(1) + tdSql.query("show create stable db.stb1") + tdSql.checkRows(1) + + tdSql.execute("drop database if exists db") + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists new") + tdSql.execute("drop database if exists db2") + tdSql.execute("drop database if exists private") + + def td4153(self): + tdLog.printNoPrefix("==========TD-4153==========") + + pass + + def td4288(self): + tdLog.printNoPrefix("==========TD-4288==========") + # keep ~ [days,365000] + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + + tdSql.execute("show variables") + res_kp = tdSql.cursor.fetchall() + resList = np.array(res_kp) + keep_index = np.where(resList == "keep") + index_value = np.dstack((keep_index[0])).squeeze() + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.query("show databases") + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + days = tdSql.getData(0, 6) + tdSql.error("alter database db keep 3650001") + tdSql.error("alter database db keep 9") + tdSql.error("alter database db keep 0b") + tdSql.error("alter database db keep 3650,9,36500") + tdSql.error("alter database db keep 3650,3650,365001") + tdSql.error("alter database db keep 36500,a,36500") + tdSql.error("alter database db keep (36500,3650,3650)") + tdSql.error("alter database db keep [36500,3650,36500]") + tdSql.error("alter database db keep 36500,0xff,3650") + tdSql.error("alter database db keep 36500,0o365,3650") + tdSql.error("alter database db keep 36500,0A3Ch,3650") + tdSql.error("alter database db keep") + tdSql.error("alter database db keep0 36500") + + tdSql.execute("alter database db keep 36500") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "36500,36500,36500") + else: + tdSql.checkData(0, 7, 36500) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db1") + tdSql.query("show databases") + if ("community" in selfPath): + tdSql.checkData(0, 7, "3650,3650,3650") + else: + tdSql.checkData(0, 7, 3650) + + tdSql.query("show variables") + tdSql.checkData(index_value, 1, 3650) + + tdSql.execute("alter database db1 keep 365") + tdSql.execute("drop database if exists db1") + + + pass + + def td4724(self): + tdLog.printNoPrefix("==========TD-4724==========") + cfgfile = self.getCfgFile() + minTablesPerVnode = 5 + maxTablesPerVnode = 10 + maxVgroupsPerDb = 100 + + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + + tdDnodes.stop(index) + vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} " + min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} " + max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} " + try: + _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + insert_sql = "insert into " + for i in range(100): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})" + tdSql.query("show dnodes") + vnode_count = tdSql.getData(0, 2) + if vnode_count <= 1: + tdLog.exit("vnode is less than 2") + + tdSql.execute(insert_sql) + tdDnodes.stop(index) + cmd = f"sed -i '$d' {cfgfile}" + try: + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + _ = subprocess.check_output(cmd, shell=True).decode("utf-8") + except Exception as e: + raise e + + tdDnodes.start(index) + + pass + + def td4889(self): + tdLog.printNoPrefix("==========TD-4889==========") + cfg = { + 'minRowsPerFileBlock': '10', + 'maxRowsPerFileBlock': '200', + 'minRows': '10', + 'maxRows': '200', + 'maxVgroupsPerDb': '100', + 'maxTablesPerVnode': '1200', + } + tdSql.query("show dnodes") + dnode_index = tdSql.getData(0,0) + tdDnodes.stop(dnode_index) + tdDnodes.deploy(dnode_index, cfg) + tdDnodes.start(dnode_index) + + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)") + + nowtime = int(round(time.time() * 1000)) + for i in range(1000): + tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})") + sql = f"insert into db.t1{i} values" + for j in range(260): + sql += f"({nowtime-1000*i-j}, {i+j})" + # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})") + tdSql.execute(sql) + + # tdDnodes.stop(dnode_index) + # tdDnodes.start(dnode_index) + + tdSql.query("show vgroups") + index = tdSql.getData(0,0) + tdSql.checkData(0, 6, 0) + tdSql.execute(f"compact vnodes in({index})") + start_time = time.time() + while True: + tdSql.query("show vgroups") + if tdSql.getData(0, 6) != 0: + tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1") + break + run_time = time.time()-start_time + if run_time > 3: + tdLog.exit("compacting not occured") + # time.sleep(0.1) + + pass + + def td5168insert(self): + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)") + tdSql.execute("create table db.t1 using db.stb1 tags(1)") + + for i in range(5): + c1 = 1001.11 + i*0.1 + c2 = 1001.11 + i*0.1 + 1*0.01 + c3 = 1001.11 + i*0.1 + 2*0.01 + c4 = 1001.11 + i*0.1 + 3*0.01 + tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})") + + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)") + # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)") + + # for i in range(1000000): + for i in range(10000): + random1 = random.uniform(1000,1001) + random2 = random.uniform(1000,1001) + random3 = random.uniform(1000,1001) + random4 = random.uniform(1000,1001) + tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})") + + pass + + def td5168(self): + tdLog.printNoPrefix("==========TD-5168==========") + # 插入小范围内的随机数 + tdLog.printNoPrefix("=====step0: 默认情况下插入数据========") + self.td5168insert() + + # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # tdSql.query("select * from db.t1 limit 100,1") + # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000,1") + # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 10000,1") + # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 100000,1") + # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + # + # tdSql.query("select * from db.t1 limit 1000000,1") + # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4) + + # 关闭服务并获取未开启压缩情况下的数据容量 + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + + cfgdir = self.getCfgDir() + cfgfile = self.getCfgFile() + + lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'" + data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'" + dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}") + + ################################################### + float_lossy = "float" + double_lossy = "double" + float_double_lossy = "float|double" + no_loosy = "" + + double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}" + _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8") + + lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} " + lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} " + lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} " + lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} " + + ################################################### + + # 开启有损压缩,参数float,并启动服务插入数据 + tdLog.printNoPrefix("=====step1: lossyColumns设置为float========") + lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4) + for j in range(4): + # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1) + # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)]) + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为float情况下的数据容量 + tdDnodes.stop(index) + dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数double,并启动服务 + tdLog.printNoPrefix("=====step2: lossyColumns设置为double========") + lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为double情况下的数据容量 + tdDnodes.stop(index) + dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}") + + # 修改有损压缩,参数 float&&double ,并启动服务 + tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========") + lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8") + tdDnodes.start(index) + self.td5168insert() + + # 查询前面所述5个时间数据并与基准数值进行比较 + for i in range(5): + tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ") + for j in range(4): + tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)]) + + # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量 + tdDnodes.stop(index) + dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8")) + lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8") + tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}") + + if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) : + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.exit("压缩未生效") + else: + tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}") + tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}") + tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}") + tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}") + tdLog.printNoPrefix("压缩生效") + + pass + + def td5433(self): + tdLog.printNoPrefix("==========TD-5433==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))") + numtab=20000 + for i in range(numtab): + sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})") + + tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')") + tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')") + tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')") + tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')") + tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')") + + tdSql.query("select distinct t1 from stb1 where t1 != '150'") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 != 150") + tdSql.checkRows(numtab-1) + tdSql.query("select distinct t1 from stb1 where t1 = 150") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1 where t1 = '150'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(numtab) + + tdSql.query("select distinct t0 from stb1 where t0 != '2'") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 != 2") + tdSql.checkRows(127) + tdSql.query("select distinct t0 from stb1 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb1") + tdSql.checkRows(128) + + tdSql.query("select distinct t1 from stb2 where t1 != '200'") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 != 200") + tdSql.checkRows(4) + tdSql.query("select distinct t1 from stb2 where t1 = 200") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2 where t1 = '200'") + tdSql.checkRows(1) + tdSql.query("select distinct t1 from stb2") + tdSql.checkRows(5) + + tdSql.query("select distinct t0 from stb2 where t0 != '2'") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 != 2") + tdSql.checkRows(4) + tdSql.query("select distinct t0 from stb2 where t0 = 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2 where t0 = '2'") + tdSql.checkRows(1) + tdSql.query("select distinct t0 from stb2") + tdSql.checkRows(5) + + pass + + def td5798(self): + tdLog.printNoPrefix("==========TD-5798 + TD-5810==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)") + tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)") + maxRemainderNum=7 + tbnum=101 + for i in range(tbnum-1): + sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})") + tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})") + tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})") + tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)") + + tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})") + tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')") + tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')") + tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)") + tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)") + tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)") + tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t100num (ts )values (now-7d)") + tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)") + tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)") + tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)") + tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)") + + #========== TD-5810 suport distinct multi-data-coloumn ========== + tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c2 from stb1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}") + tdSql.checkRows(2) + + tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c2 from t1") + tdSql.checkRows(4) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c1 from t1 ") + tdSql.checkRows(2) + tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}") + tdSql.checkRows(1) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2") + tdSql.checkRows(1) + + tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2") + tdSql.checkRows(2) + + tdSql.error("select distinct c5 from stb1") + tdSql.error("select distinct c5 from t1") + tdSql.error("select distinct c1 from db.*") + tdSql.error("select c2, distinct c1 from stb1") + tdSql.error("select c2, distinct c1 from t1") + tdSql.error("select distinct c2 from ") + tdSql.error("distinct c2 from stb1") + tdSql.error("distinct c2 from t1") + tdSql.error("select distinct c1, c2, c3 from stb1") + tdSql.error("select distinct c1, c2, c3 from t1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1") + tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1") + tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1") + tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}") + tdSql.checkRows(tbnum*3) + tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from stb1 order by ts") + tdSql.error("select distinct c1, c2 from t1 order by ts") + tdSql.error("select distinct c1, ts from stb1 group by c2") + tdSql.error("select distinct c1, ts from t1 group by c2") + tdSql.error("select distinct c1, max(c2) from stb1 ") + tdSql.error("select distinct c1, max(c2) from t1 ") + tdSql.error("select max(c2), distinct c1 from stb1 ") + tdSql.error("select max(c2), distinct c1 from t1 ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)") + tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1") + tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1") + tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ") + tdSql.checkRows(6) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)") + tdSql.checkRows(15) + tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)") + tdSql.checkRows(3) + + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ") + tdSql.checkRows(3) + tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ") + tdSql.checkRows(0) + tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4") + tdSql.checkRows(3) + tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)") + # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)") + tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)") + # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)") + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )") + tdSql.checkRows(1) + tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ") + + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)") + # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)") + + + + #========== TD-5798 suport distinct multi-tags-coloumn ========== + tdSql.query("select distinct t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0, t1, t2 from stb1") + tdSql.checkRows(maxRemainderNum*2+1) + tdSql.query("select distinct t0 t1, t1 t2 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t0, t0 from stb1") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t0, t1 from t1") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from t100num") + tdSql.checkRows(1) + + tdSql.query("select distinct t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t4, t2 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2, t3, t4 from stb2") + tdSql.checkRows(maxRemainderNum*3+1) + tdSql.query("select distinct t2 t1, t3 t2 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t3, t3, t3 from stb2") + tdSql.checkRows(maxRemainderNum+1) + tdSql.query("select distinct t2, t3 from t01") + tdSql.checkRows(1) + tdSql.query("select distinct t3, t4 from t0100num") + tdSql.checkRows(1) + + + ########## should be error ######### + tdSql.error("select distinct from stb1") + tdSql.error("select distinct t3 from stb1") + tdSql.error("select distinct t1 from db.*") + tdSql.error("select distinct t2 from ") + tdSql.error("distinct t2 from stb1") + tdSql.error("select distinct stb1") + tdSql.error("select distinct t0, t1, t2, t3 from stb1") + tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1") + + tdSql.error("select dist t0 from stb1") + tdSql.error("select distinct stb2.t2, stb2.t3 from stb1") + tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1") + + tdSql.error("select distinct t0, t1 from t1 where t0 < 7") + + ########## add where condition ########## + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3") + tdSql.checkRows(3) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2") + tdSql.checkRows(2) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2") + tdSql.checkRows(1) + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2") + tdSql.checkRows(3) + tdSql.error("select distinct t0, t1 from stb1 where c1 > 2") + tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5") + tdSql.checkRows(1) + tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4") + tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ") + tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)") + tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)") + + tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2") + tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2") + tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)") + tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)") + tdSql.checkRows(5) + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ") + tdSql.checkRows(4) + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ") + tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)") + tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)") + tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.checkRows(1) + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3") + tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3") + tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3") + tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4") + tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ") + + pass + + def td5935(self): + tdLog.printNoPrefix("==========TD-5935==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + + tdSql.execute("use db") + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)") + nowtime=int(round((time.time()*1000))) + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})" + tdSql.execute(sql) + for j in range(1000): + tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})") + tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ") + + ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ########## + stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10" + tdSql.query(stddevAndIntervalSql) + tdSql.checkRows(10) + + ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ########## + fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10" + tdSql.query(fillsql) + fillResult=False + if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None): + fillResult=True + if fillResult: + tdLog.success(f"sql is :{fillsql}, fill(next) is correct") + else: + tdLog.exit("fill(next) is wrong") + + pass + + def td6068(self): + tdLog.printNoPrefix("==========TD-6068==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)") + + for i in range(100): + sql = f"create table db.t{i} using db.stb1 tags({i})" + tdSql.execute(sql) + tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)") + tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )") + tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)") + tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)") + + tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1") + tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1") + tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1") + tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1") + tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1") + tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1") + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, diff(c1) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.checkCols(4) + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.query("select ts as t, diff(c1) from t1") + tdSql.error("select ts as t, diff(c1) from stb1") + tdSql.query("select ts as t, diff(c2) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c3) from t1") + tdSql.error("select ts as t, diff(c4) from t1") + tdSql.query("select ts as t, diff(c5) from t1") + tdSql.checkRows(5) + tdSql.error("select ts as t, diff(c6) from t1") + tdSql.error("select ts as t, diff(t1) from t1") + tdSql.error("select ts as t, diff(c1, c2) from t1") + + tdSql.error("select ts as t, bottom(c1, 0) from t1") + tdSql.query("select ts as t, bottom(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, bottom(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, bottom(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, bottom(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c3, 5) from t1") + tdSql.error("select ts as t, bottom(c4, 5) from t1") + tdSql.query("select ts as t, bottom(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, bottom(c6, 5) from t1") + tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, bottom(t1, 1) from t1") + tdSql.error("select ts as t, bottom(t1, 1) from stb1") + tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3") + + + tdSql.error("select ts as t, top(c1, 0) from t1") + tdSql.query("select ts as t, top(c1, 5) from t1") + tdSql.checkRows(5) + tdSql.checkCols(3) + for i in range(5): + data=tdSql.getData(i, 0) + tdSql.checkData(i, 1, data) + tdSql.query("select ts as t, top(c1, 5) from stb1") + tdSql.checkRows(5) + tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname") + tdSql.checkRows(500) + tdSql.query("select ts as t, top(c1, 8) from t1") + tdSql.checkRows(6) + tdSql.query("select ts as t, top(c2, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c3, 5) from t1") + tdSql.error("select ts as t, top(c4, 5) from t1") + tdSql.query("select ts as t, top(c5, 8) from t1") + tdSql.checkRows(6) + tdSql.error("select ts as t, top(c6, 5) from t1") + tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b") + tdSql.error("select ts as t, top(t1, 1) from t1") + tdSql.error("select ts as t, top(t1, 1) from stb1") + tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3") + tdSql.error("select ts as t, top(t1, 3) from t1 order by c3") + pass + + def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""): + + ''' + apercentile function: + :param col: string, column name, required parameters; + :param p: float, percentile interval, [0,100], required parameters; + :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]}; + :param alias: string, result column another name; + :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters; + :param condition: expression; + :param args: other funtions,like: ', last(col)' + :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1 + ''' + + return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}" + + def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ): + + tdSql.query(f"select count({col}) from {table_expr} {condition}") + if tdSql.queryRows == 0: + tdSql.query(self.apercentile_query_form( + col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + tdSql.checkRows(0) + return + + pset = [0, 40, 60, 100] + if p not in pset: + pset.append(p) + + if "stb" in table_expr: + tdSql.query(f"select spread({col}) from stb1") + else: + tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)") + spread_num = tdSql.getData(0, 0) + + for pi in pset: + + if "group" in condition: + tdSql.query(f"select last_row({col}) from {table_expr} {condition}") + query_result = tdSql.queryResult + query_rows = tdSql.queryRows + for i in range(query_rows): + pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0] + tbname = query_result[i][-1] + tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}") + print(tdSql.sql) + pre_data = tdSql.getData(0, 0) + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + )) + if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + tdSql.checkDeviaRation(i, 0, pre_data, 0.1) + else: + devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + # if "group" in condition: + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + # )) + # query_result = tdSql.queryResult + # query_rows = tdSql.queryRows + # tdSql.query(self.apercentile_query_form( + # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition + # )) + # for i in range(query_rows): + # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02): + # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1) + # else: + # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02)) + # if devia < 0.5: + # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} <= expect deviation: 0.01") + # else: + # tdLog.exit( + # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, " + # f"actual deviation:{devia} > expect deviation: 0.01") + + else: + if ',' in alias or not alias: + tdSql.query(f"select {col} from {table_expr} {condition}") + elif "stb" not in table_expr: + tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}") + else: + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition + )) + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query(self.apercentile_query_form( + col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition + )) + + if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02): + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1) + else: + devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02)) + if devia < 0.5: + tdLog.info( + f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} <= expect deviation: 0.01") + else: + tdLog.exit( + f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, " + f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, " + f"actual deviation:{devia} > expect deviation: 0.01") + + + def apercentile_query(self): + + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + + # case1: int col + self.checkapert() + # case2: float col + case2 = {'col':'c2'} + self.checkapert(**case2) + # case3: double col + case3 = {'col':'c5'} + self.checkapert(**case3) + # case4: bigint col + case4 = {'col':'c7'} + self.checkapert(**case4) + # case5: smallint col + case5 = {'col':'c8'} + self.checkapert(**case5) + # case6: tinyint col + case6 = {'col':'c9'} + self.checkapert(**case6) + # case7: stable + case7 = {'table_expr':'stb1'} + self.checkapert(**case7) + # case8: nest query, outquery + case8 = {'table_expr':'(select c1 from t1)'} + self.checkapert(**case8) + # case9: nest query, inquery and out query + case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'} + self.checkapert(**case9) + + # case10: nest query, inquery + tdSql.query("select * from (select c1 from stb1)") + if tdSql.queryRows == 0: + tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)") + tdSql.checkRows(0) + else: + query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] + tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1) + tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1) + tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)") + tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1) + + # case11: no algorithm = algo:0 + case11 = {'com':'', 'algo': ''} + self.checkapert(**case11) + + # case12~14: p: bin/oct/hex + case12 = {'p': 0b1100100} + self.checkapert(**case12) + case13 = {'algo':'"T-DIGEST"'} + self.checkapert(**case13) + case14 = {'p':0x32, 'algo':'"DEFAULT"'} + self.checkapert(**case14) + + # case15~21: mix with aggregate function + case15 = {'alias':', count(*)'} + self.checkapert(**case15) + case16 = {'alias':', avg(c1)'} + self.checkapert(**case16) + case17 = {'alias':', twa(c1)'} + self.checkapert(**case17) + case18 = {'alias':', irate(c1)'} + self.checkapert(**case18) + case19 = {'alias':', sum(c1)'} + self.checkapert(**case19) + case20 = {'alias':', stddev(c1)'} + self.checkapert(**case20) + case21 = {'alias':', leastsquares(c1, 1, 1)'} + self.checkapert(**case21) + + # case22~27:mix with selector function + case22 = {'alias':', min(c1)'} + self.checkapert(**case22) + case23 = {'alias':', max(c1)'} + self.checkapert(**case23) + case24 = {'alias':', first(c1)'} + self.checkapert(**case24) + case25 = {'alias':', last(c1)'} + self.checkapert(**case25) + case26 = {'alias':', percentile(c1, 0)'} + self.checkapert(**case26) + case27 = {'alias':', apercentile(c1, 0, "t-digest")'} + self.checkapert(**case27) + + # case28~29: mix with computing function + case28 = {'alias':', spread(c1)'} + self.checkapert(**case28) + # case29: mix with four operation + case29 = {'alias':'+ spread(c1)'} + self.checkapert(**case29) + + # case30~36: with condition + case30 = {'condition':'where ts > now'} + self.checkapert(**case30) + case31 = {'condition':'where c1 between 1 and 200'} + self.checkapert(**case31) + case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'} + self.checkapert(**case32) + case33 = {'condition':'where c1>100 and c2<100'} + self.checkapert(**case33) + case34 = {'condition':'where c1 is not null'} + self.checkapert(**case34) + case35 = {'condition':'where c4 like "_inary%"'} + self.checkapert(**case35) + case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'} + self.checkapert(**case36) + + # case37~38: with join + case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'} + self.checkapert(**case37) + case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'} + self.checkapert(**case38) + + # case39: with group by + case39 = {'table_expr':'stb1', 'condition':'group by tbname'} + self.checkapert(**case39) + + # case40: with slimit + case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'} + self.checkapert(**case40) + + # case41: with soffset + case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'} + self.checkapert(**case41) + + # case42: with order by + case42 = {'table_expr':'stb1' ,'condition':'order by ts'} + self.checkapert(**case42) + case43 = {'table_expr':'t1' ,'condition':'order by ts'} + self.checkapert(**case43) + + # case44: with limit offset + case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'} + self.checkapert(**case44) + case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'} + self.checkapert(**case45) + + pass + + def error_apercentile(self): + + # unusual test + # + # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool + # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16) + # + # form test + tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm + tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm + tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm + tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm + tdSql.error("apercentile( c1, 100) from t1") # no select + tdSql.error("select apercentile from t1") # no algorithm condition + tdSql.error("select apercentile c1,0 from t1") # no brackets + tdSql.error("select apercentile (c1,0) t1") # no from + tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm + tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr + tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1 + tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2 + tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3 + tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5 + tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6 + tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7 + tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8 + tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9 + tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10 + tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11 + tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12 + tdSql.error("select apercentile(c1) from t1") # agrs: 1 + tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4 + tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4 + tdSql.error("select apercentile() from t1") # agrs: null 1 + tdSql.error("select apercentile from t1") # agrs: null 2 + tdSql.error("select apercentile( , , ) from t1") # agrs: null 3 + tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4 + tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column + tdSql.error(self.apercentile_query_form(col=123)) # col:numerical + tdSql.error(self.apercentile_query_form(col=True)) # col:bool + tdSql.error(self.apercentile_query_form(col='')) # col:'' + tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr + tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical + tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp + tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary + tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool + tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar + tdSql.error(self.apercentile_query_form(p=True)) # p:bool + tdSql.error(self.apercentile_query_form(p='a')) # p:str + tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr + tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp + tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str + tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float + tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool + tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp + tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr + + # boundary test + tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100] + tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint + tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100] + tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint + tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1] + tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint + + # mix function test + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function + tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function + tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function + tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function + tdSql.error(self.apercentile_query_form(alias=', *')) # mix with * + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function + tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function + tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function + tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation + + def apercentile_data(self, tbnum, data_row, basetime): + for i in range(tbnum): + for j in range(data_row): + tdSql.execute( + f"insert into t{i} values (" + f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, " + f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, " + f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )" + ) + + tdSql.execute( + f"insert into t{i} values (" + f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, " + f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, " + f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )" + ) + tdSql.execute( + f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )" + ) + + pass + + def td6108(self): + tdLog.printNoPrefix("==========TD-6108==========") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 3650") + tdSql.execute("use db") + + tdSql.execute( + "create stable db.stb1 (\ + ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \ + c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\ + ) \ + tags(st1 int)" + ) + tdSql.execute( + "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)" + ) + tbnum = 10 + for i in range(tbnum): + tdSql.execute(f"create table t{i} using stb1 tags({i})") + tdSql.execute(f"create table tt{i} using stb2 tags({i})") + + tdLog.printNoPrefix("######## no data test:") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data test:") + nowtime = int(round(time.time() * 1000)) + per_table_rows = 1000 + self.apercentile_data(tbnum, per_table_rows, nowtime) + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## insert data with NULL test:") + tdSql.execute(f"insert into t1(ts) values ({nowtime-5})") + tdSql.execute(f"insert into t1(ts) values ({nowtime+5})") + self.apercentile_query() + self.error_apercentile() + + tdLog.printNoPrefix("######## check after WAL test:") + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + self.apercentile_query() + self.error_apercentile() + + + def run(self): + + # master branch + self.td6068() + + # self.td5168() + # self.td5433() + # self.td5798() + + # develop branch + # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now. + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) + + + diff --git a/tests/pytest/insert/special_character_show.py b/tests/pytest/insert/special_character_show.py index 3b2df5c87380c22fb18cbee06c866249b4365a70..ce9f1de76aa5896beb3aa78dce8a3a65a81a973c 100644 --- a/tests/pytest/insert/special_character_show.py +++ b/tests/pytest/insert/special_character_show.py @@ -31,9 +31,8 @@ class TDTestCase: tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))') tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))') - tdLog.info('=============== step2,create table增加了转义字符') + tdLog.info('=============== step2,create table with escape character') tdLog.info('create table tb1 using stb1 tags("abc\\"def")') - #增加了转义字符\ tdSql.execute('create table tb1 using stb1 tags("abc\\"def")') tdLog.info('=============== step3,insert data') diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py index f22cfcd4ec709b1d4440065fab398979afeb3adc..e5c8868ad4d54e32e3458ebb02e4f1118d57c6c9 100644 --- a/tests/pytest/query/operator_cost.py +++ b/tests/pytest/query/operator_cost.py @@ -25,8 +25,7 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - now = time.time() - self.ts = int(round(now * 1000)) + self.ts = 1633333333000. self.num = 10 def run(self): @@ -534,4 +533,4 @@ class TDTestCase: tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/query.py b/tests/pytest/query/query.py index 1d9d6e5ea4d5c41c13222ceb4e23b165f0062837..8e79fc5f686d77aa276da5bca7d9493ff1a00ffb 100644 --- a/tests/pytest/query/query.py +++ b/tests/pytest/query/query.py @@ -149,6 +149,22 @@ class TDTestCase: tdLog.info("case for bug_6387") self.bug_6387() + #JIRA TS-583 + tdLog.info("case for JIRA TS-583") + tdSql.execute("create database test2") + tdSql.execute("use test2") + tdSql.execute("create table stb(ts timestamp, c1 int) tags(t1 binary(120))") + tdSql.execute("create table t0 using stb tags('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')") + + tdSql.query("show create table t0") + tdSql.checkRows(1) + + tdSql.execute("create table stb2(ts timestamp, c1 int) tags(t1 nchar(120))") + tdSql.execute("create table t1 using stb2 tags('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')") + + tdSql.query("show create table t1") + tdSql.checkRows(1) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 2d854643b8a2980bf38d4aacc3c20ab8843abdf8..55c964c2557eff3204cf31bfb63cd5e3f3dd5501 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -275,7 +275,7 @@ class TDDnode: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/taosd" - blm3BinPath = buildPath + "/build/bin/blm3" + taosadapterBinPath = buildPath + "/build/bin/taosadapter" if self.deployed == 0: tdLog.exit("dnode:%d is not deployed" % (self.index)) @@ -291,10 +291,10 @@ class TDDnode: print(cmd) - blm3Cmd = "nohup %s > /dev/null 2>&1 & " % ( - blm3BinPath) - if os.system(blm3Cmd) != 0: - tdLog.exit(blm3Cmd) + taosadapterCmd = "nohup %s > /dev/null 2>&1 & " % ( + taosadapterBinPath) + if os.system(taosadapterCmd) != 0: + tdLog.exit(taosadapterCmd) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -340,7 +340,7 @@ class TDDnode: tdLog.info("taosd found in %s" % buildPath) binPath = buildPath + "/build/bin/taosd" - blm3BinPath = buildPath + "/build/bin/blm3" + taosadapterBinPath = buildPath + "/build/bin/taosadapter" if self.deployed == 0: tdLog.exit("dnode:%d is not deployed" % (self.index)) @@ -356,9 +356,9 @@ class TDDnode: print(cmd) - blm3Cmd = "%s > /dev/null 2>&1 & " % (blm3BinPath) - if os.system(blm3Cmd) != 0: - tdLog.exit(blm3Cmd) + taosadapterCmd = "%s > /dev/null 2>&1 & " % (taosadapterBinPath) + if os.system(taosadapterCmd) != 0: + tdLog.exit(taosadapterCmd) if os.system(cmd) != 0: tdLog.exit(cmd) @@ -366,18 +366,18 @@ class TDDnode: tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) def stop(self): - blm3ToBeKilled = "blm3" + taosadapterToBeKilled = "taosadapter" - blm3PsCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % blm3ToBeKilled - blm3ProcessID = subprocess.check_output( - blm3PsCmd, shell=True).decode("utf-8") + taosadapterPsCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % taosadapterToBeKilled + taosadapterProcessID = subprocess.check_output( + taosadapterPsCmd, shell=True).decode("utf-8") - while(blm3ProcessID): - blm3KillCmd = "kill -INT %s > /dev/null 2>&1" % blm3ProcessID - os.system(blm3KillCmd) + while(taosadapterProcessID): + taosadapterKillCmd = "kill -INT %s > /dev/null 2>&1" % taosadapterProcessID + os.system(taosadapterKillCmd) time.sleep(1) - blm3ProcessID = subprocess.check_output( - blm3PsCmd, shell=True).decode("utf-8") + taosadapterProcessID = subprocess.check_output( + taosadapterPsCmd, shell=True).decode("utf-8") if self.valgrind == 0: toBeKilled = "taosd" diff --git a/tests/script/general/compute/csum.sim b/tests/script/general/compute/csum.sim index 4d6f748566fdfedc3b6ac2ccf5fa6a22c7a5340f..b350e4f403a02702741e0f10ab91fb9799e776d3 100644 --- a/tests/script/general/compute/csum.sim +++ b/tests/script/general/compute/csum.sim @@ -100,6 +100,76 @@ if $data11 != -2 then return -1 endi +print ==========>TD10758 +sql create stable st(ts timestamp, c1 int) tags(t int); +sql create table ct1 using st tags(1) +sql insert into ct1 values(now, 1)(now+1s, 2)(now+2s, 3) +sql select csum(c1),ts,tbname,t from ct1 +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select csum(c1),ts,tbname,t from st group by tbname +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select diff(c1),ts,tbname,t from ct1 +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select diff(c1),ts,tbname,t from st group by tbname +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select mavg(c1,2),ts,tbname,t from ct1 +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select mavg(c1,2),ts,tbname,t from st group by tbname +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select derivative(c1,1s,0),ts,tbname,t from ct1 +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi +sql select derivative(c1,1s,0),ts,tbname,t from st group by tbname +print $data10 , $data11 , $data12, $data13, $data14 +if $data13 != ct1 then + return -1 +endi +if $data14 != 1 then + return -1 +endi + + print =============== clear sql drop database $db sql show databases diff --git a/tests/test/c/createNormalTable.c b/tests/test/c/createNormalTable.c index 60253e2add1ebaa1e6c2c00b073cf13672789346..0dad7eb9b68a5584f4f6347c74b8266299c03da4 100644 --- a/tests/test/c/createNormalTable.c +++ b/tests/test/c/createNormalTable.c @@ -233,5 +233,5 @@ void shellParseArgument(int argc, char *argv[]) { pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); pPrint("%s replica:%d %s", GREEN, replica, NC); - pPrint("%s start create table performace test %s", GREEN, NC); + pPrint("%s start create table performance test %s", GREEN, NC); } diff --git a/tests/test/c/createTablePerformance.c b/tests/test/c/createTablePerformance.c index b94c687f2cba7310949b0a3b12b6f4fc007e5a9a..0e81279819ec8c1c1c0e5601a24193823997c914 100644 --- a/tests/test/c/createTablePerformance.c +++ b/tests/test/c/createTablePerformance.c @@ -221,5 +221,5 @@ void shellParseArgument(int argc, char *argv[]) { pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC); pPrint("%s replica:%d %s", GREEN, replica, NC); - pPrint("%s start create table performace test %s", GREEN, NC); + pPrint("%s start create table performance test %s", GREEN, NC); } diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h index 2e19dde3d9c52c20705d131f471a2e0e389589e4..d9a7e13138b125f92d0611614f74abf268eaee70 100644 --- a/tests/tsim/inc/sim.h +++ b/tests/tsim/inc/sim.h @@ -135,7 +135,7 @@ typedef struct _script_t { int32_t numOfLines; // number of lines in the script int32_t bgScriptLen; char fileName[MAX_FILE_NAME_LEN]; // script file name - char error[MAX_ERROR_LEN]; + char error[TSDB_MAX_BINARY_LEN + 100]; char * optionBuffer; SCmdLine *lines; // command list SVariable variables[MAX_VAR_LEN]; @@ -178,4 +178,4 @@ bool simExecuteLineInsertCmd(SScript *script, char *option); bool simExecuteLineInsertErrorCmd(SScript *script, char *option); void simVisuallizeOption(SScript *script, char *src, char *dst); -#endif \ No newline at end of file +#endif