diff --git a/.gitmodules b/.gitmodules
index dbb02d4ef7ed65d11418e271cac7e61b95c2a482..7edcdff5d3dd805ec6b222915688940c7bd7dcb9 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -16,6 +16,9 @@
[submodule "deps/TSZ"]
path = deps/TSZ
url = https://github.com/taosdata/TSZ.git
-[submodule "src/plugins/blm3"]
- path = src/plugins/blm3
- url = https://github.com/taosdata/blm3
+[submodule "deps/avro"]
+ path = deps/avro
+ url = https://github.com/apache/avro
+[submodule "src/plugins/taosadapter"]
+ path = src/plugins/taosadapter
+ url = https://github.com/taosdata/taosadapter
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 489cb5b197a0d4a1e09f8167a435cce382148fec..547455d07b6ba25ac58ae5e4851c5cd5b08e3c60 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -15,9 +15,30 @@ ELSE ()
CMAKE_MINIMUM_REQUIRED(VERSION 3.0)
ENDIF ()
+if(NOT WIN32)
+ string(ASCII 27 Esc)
+ set(ColourReset "${Esc}[m")
+ set(ColourBold "${Esc}[1m")
+ set(Red "${Esc}[31m")
+ set(Green "${Esc}[32m")
+ set(Yellow "${Esc}[33m")
+ set(Blue "${Esc}[34m")
+ set(Magenta "${Esc}[35m")
+ set(Cyan "${Esc}[36m")
+ set(White "${Esc}[37m")
+ set(BoldRed "${Esc}[1;31m")
+ set(BoldGreen "${Esc}[1;32m")
+ set(BoldYellow "${Esc}[1;33m")
+ set(BoldBlue "${Esc}[1;34m")
+ set(BoldMagenta "${Esc}[1;35m")
+ set(BoldCyan "${Esc}[1;36m")
+ set(BoldWhite "${Esc}[1;37m")
+endif()
+
SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE)
+SET(TD_USB_DONGLE FALSE)
SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_STORAGE FALSE)
diff --git a/Jenkinsfile b/Jenkinsfile
index a715cf347a711bc11c05c05b89f0d18b8cb96063..9cc65d24f8aae3a97890e6676ff1091d32f7dc59 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -107,7 +107,147 @@ def pre_test(){
make > /dev/null
make install > /dev/null
cd ${WKC}/tests
- pip3 install ${WKC}/src/connector/python/
+ pip3 install ${WKC}/src/connector/python/
+ '''
+ return 1
+}
+def pre_test_noinstall(){
+ sh'hostname'
+ sh'''
+ cd ${WKC}
+ git reset --hard HEAD~10 >/dev/null
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ sh '''
+ cd ${WKC}
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WKC}
+ git checkout 2.0
+ '''
+ }
+ else{
+ sh '''
+ cd ${WKC}
+ git checkout develop
+ '''
+ }
+ }
+ sh'''
+ cd ${WKC}
+ git pull >/dev/null
+ git fetch origin +refs/pull/${CHANGE_ID}/merge
+ git checkout -qf FETCH_HEAD
+ git clean -dfx
+ git submodule update --init --recursive
+ cd ${WK}
+ git reset --hard HEAD~10
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ sh '''
+ cd ${WK}
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WK}
+ git checkout 2.0
+ '''
+ }
+ else{
+ sh '''
+ cd ${WK}
+ git checkout develop
+ '''
+ }
+ }
+ sh '''
+ cd ${WK}
+ git pull >/dev/null
+
+ export TZ=Asia/Harbin
+ date
+ git clean -dfx
+ mkdir debug
+ cd debug
+ cmake .. > /dev/null
+ make
+ '''
+ return 1
+}
+def pre_test_mac(){
+ sh'hostname'
+ sh'''
+ cd ${WKC}
+ git reset --hard HEAD~10 >/dev/null
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ sh '''
+ cd ${WKC}
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WKC}
+ git checkout 2.0
+ '''
+ }
+ else{
+ sh '''
+ cd ${WKC}
+ git checkout develop
+ '''
+ }
+ }
+ sh'''
+ cd ${WKC}
+ git pull >/dev/null
+ git fetch origin +refs/pull/${CHANGE_ID}/merge
+ git checkout -qf FETCH_HEAD
+ git clean -dfx
+ git submodule update --init --recursive
+ cd ${WK}
+ git reset --hard HEAD~10
+ '''
+ script {
+ if (env.CHANGE_TARGET == 'master') {
+ sh '''
+ cd ${WK}
+ git checkout master
+ '''
+ }
+ else if(env.CHANGE_TARGET == '2.0'){
+ sh '''
+ cd ${WK}
+ git checkout 2.0
+ '''
+ }
+ else{
+ sh '''
+ cd ${WK}
+ git checkout develop
+ '''
+ }
+ }
+ sh '''
+ cd ${WK}
+ git pull >/dev/null
+
+ export TZ=Asia/Harbin
+ date
+ git clean -dfx
+ mkdir debug
+ cd debug
+ cmake .. > /dev/null
+ cmake --build .
'''
return 1
}
@@ -179,9 +319,9 @@ def pre_test_win(){
git clean -dfx
mkdir debug
cd debug
- call "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\vcvarsall.bat" amd64
+ call "C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsall.bat" amd64
cmake ../ -G "NMake Makefiles"
- nmake || exit 8
+ set CL=/MP nmake nmake || exit 8
nmake install || exit 8
xcopy /e/y/i/f C:\\workspace\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 || exit 8
cd C:\\workspace\\TDinternal\\community\\src\\connector\\python
@@ -201,8 +341,8 @@ pipeline {
stage('pre_build'){
agent{label 'master'}
options { skipDefaultCheckout() }
- when{
- changeRequest()
+ when {
+ changeRequest()
}
steps {
script{
@@ -322,21 +462,9 @@ pipeline {
'''
sh '''
- cd ${WKC}/src/connector/node-rest/
- npm install
- npm run build
- npm run build:test
- npm run test
-
- '''
-
- sh '''
- cd ${WKC}/tests/examples/C#/taosdemo
- mcs -out:taosdemo *.cs > /dev/null 2>&1
- echo '' |./taosdemo -c /etc/taos
- cd ${WKC}/tests/connectorTest/C#Test/nanosupport
- mcs -out:nano *.cs > /dev/null 2>&1
- echo '' |./nano
+ cd ${WKC}/tests/examples/C#/taosdemo
+ mcs -out:taosdemo *.cs > /dev/null 2>&1
+ echo '' |./taosdemo -c /etc/taos
'''
sh '''
cd ${WKC}/tests/gotest
@@ -413,7 +541,7 @@ pipeline {
stage('test_b4_s7') {
agent{label " slave7 || slave17 "}
steps {
- timeout(time: 55, unit: 'MINUTES'){
+ timeout(time: 105, unit: 'MINUTES'){
pre_test()
sh '''
date
@@ -469,36 +597,96 @@ pipeline {
}
}
}
-
- // stage('build'){
- // agent{label " wintest "}
- // steps {
- // pre_test()
- // script{
- // while(win_stop == 0){
- // sleep(1)
- // }
- // }
- // }
- // }
- // stage('test'){
- // agent{label "win"}
- // steps{
+ stage('arm64centos7') {
+ agent{label " arm64centos7 "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('arm64centos8') {
+ agent{label " arm64centos8 "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('arm32bionic') {
+ agent{label " arm32bionic "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('arm64bionic') {
+ agent{label " arm64bionic "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('arm64focal') {
+ agent{label " arm64focal "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('centos7') {
+ agent{label " centos7 "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('ubuntu:trusty') {
+ agent{label " trusty "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('ubuntu:xenial') {
+ agent{label " xenial "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('ubuntu:bionic') {
+ agent{label " bionic "}
+ steps {
+ pre_test_noinstall()
+ }
+ }
+ stage('Mac_build') {
+ agent{label " catalina "}
+ steps {
+ pre_test_mac()
+ }
+ }
+
+ stage('build'){
+ agent{label " wintest "}
+ steps {
+ pre_test()
+ script{
+ while(win_stop == 0){
+ sleep(1)
+ }
+ }
+ }
+ }
+ stage('test'){
+ agent{label "win"}
+ steps{
- // catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
- // pre_test_win()
- // timeout(time: 20, unit: 'MINUTES'){
- // bat'''
- // cd C:\\workspace\\TDinternal\\community\\tests\\pytest
- // .\\test-all.bat Wintest
- // '''
- // }
- // }
- // script{
- // win_stop=1
- // }
- // }
- // }
+ catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
+ pre_test_win()
+ timeout(time: 20, unit: 'MINUTES'){
+ bat'''
+ cd C:\\workspace\\TDinternal\\community\\tests\\pytest
+ .\\test-all.bat wintest
+ '''
+ }
+ }
+ script{
+ win_stop=1
+ }
+ }
+ }
}
diff --git a/README.md b/README.md
index c821bdc031fc3125e7afdfd2f8a9c2878e51f505..edca04afd486687ea8653e955ae50da457f77ab9 100644
--- a/README.md
+++ b/README.md
@@ -129,7 +129,7 @@ mkdir debug && cd debug
cmake .. && cmake --build .
```
-Note TDengine 2.3.0.0 and later use a component named 'blm3' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The blm3 is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull blm3 source code. Please install go language 1.14 or above for compiling blm3. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem.
+Note TDengine 2.3.x.0 and later use a component named 'taosadapter' to play http daemon role by default instead of the http daemon embedded in the early version of TDengine. The taosadapter is programmed by go language. If you pull TDengine source code to the latest from an existing codebase, please execute 'git submodule update --init --recursive' to pull taosadapter source code. Please install go language 1.14 or above for compiling taosadapter. If you meet difficulties regarding 'go mod', especially you are from China, you can use a proxy to solve the problem.
```
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
diff --git a/cmake/define.inc b/cmake/define.inc
index e0cdfd3efc6be2673dc60a53f035e132f5a20a55..b381853eba57aa7b9efb905790e77b1d1fdcf900 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
+CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
IF (TD_ACCOUNT)
@@ -13,6 +13,10 @@ IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()
+IF (TD_USB_DONGLE)
+ ADD_DEFINITIONS(-D_USB_DONGLE)
+ENDIF ()
+
IF (TD_MQTT)
ADD_DEFINITIONS(-D_MQTT)
ENDIF ()
@@ -117,14 +121,13 @@ IF (TD_MIPS_32)
SET(COMMON_FLAGS "-Wall -Werror -fPIC -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
-IF (TD_APLHINE)
+IF (TD_ALPINE)
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_libraries(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
MESSAGE(STATUS "aplhine is defined")
ENDIF ()
-MESSAGE("before BUILD_HTTP: " ${BUILD_HTTP})
IF ("${BUILD_HTTP}" STREQUAL "")
IF (TD_LINUX)
IF (TD_ARM_32)
@@ -136,16 +139,27 @@ IF ("${BUILD_HTTP}" STREQUAL "")
SET(BUILD_HTTP "true")
ENDIF ()
ENDIF ()
-MESSAGE("after BUILD_HTTP: " ${BUILD_HTTP})
IF (${BUILD_HTTP} MATCHES "true")
SET(TD_BUILD_HTTP TRUE)
+ELSEIF (${BUILD_HTTP} MATCHES "false")
+ SET(TD_BUILD_HTTP FALSE)
ENDIF ()
IF (TD_BUILD_HTTP)
ADD_DEFINITIONS(-DHTTP_EMBEDDED)
ENDIF ()
+IF ("${AVRO_SUPPORT}" MATCHES "true")
+ SET(TD_AVRO_SUPPORT TRUE)
+ELSEIF ("${AVRO_SUPPORT}" MATCHES "false")
+ SET(TD_AVRO_SUPPORT FALSE)
+ENDIF ()
+
+IF (TD_AVRO_SUPPORT)
+ ADD_DEFINITIONS(-DAVRO_SUPPORT)
+ENDIF ()
+
IF (TD_LINUX)
ADD_DEFINITIONS(-DLINUX)
ADD_DEFINITIONS(-D_LINUX)
@@ -158,11 +172,14 @@ IF (TD_LINUX)
ENDIF ()
IF (TD_MEMORY_SANITIZER)
- SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
- MESSAGE(STATUS "memory sanitizer detected as true")
+ IF (TD_ARCHLINUX)
+ SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
+ ELSE ()
+ SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
+ ENDIF ()
+ MESSAGE(STATUS "${BoldRed}Will compile with memory sanitizer! ${ColourReset}")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
- MESSAGE(STATUS "memory sanitizer detected as false")
ENDIF ()
SET(RELEASE_FLAGS "-O3 -Wno-error")
diff --git a/cmake/env.inc b/cmake/env.inc
index 5ee0b2983c0394c3e3aad26a622bdd2e6247c4be..1c594cd4be229cf259d76f9612b35fafde46221c 100755
--- a/cmake/env.inc
+++ b/cmake/env.inc
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
+CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
SET(CMAKE_C_STANDARD 11)
diff --git a/cmake/input.inc b/cmake/input.inc
index 5bd1a7bed6fe9b0c7dc51c46870d8109462eae81..0812711a5824ce0b328374fcdd04fc5f229ad01c 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
+CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
IF (${ACCOUNT} MATCHES "true")
@@ -92,6 +92,8 @@ ENDIF ()
SET(TD_BUILD_HTTP FALSE)
+SET(TD_AVRO_SUPPORT FALSE)
+
SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
diff --git a/cmake/install.inc b/cmake/install.inc
index 9dfe8d0ac6c4dd73b090c60605595f6be3abc478..c90aa3f9511e416106309e603853028e7096f082 100755
--- a/cmake/install.inc
+++ b/cmake/install.inc
@@ -1,8 +1,7 @@
IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
- INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
- INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
+ INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Linux ${TD_VER_NUMBER})")
ELSEIF (TD_WINDOWS)
IF (TD_POWER)
SET(CMAKE_INSTALL_PREFIX C:/PowerDB)
@@ -36,11 +35,10 @@ ELSEIF (TD_WINDOWS)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
- INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.35-dist.jar DESTINATION connector/jdbc)
+ INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.36-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
- INSTALL(CODE "execute_process(COMMAND chmod 777 ${TD_MAKE_INSTALL_SH})")
- INSTALL(CODE "execute_process(COMMAND ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin ${TD_VER_NUMBER})")
+ INSTALL(CODE "execute_process(COMMAND bash ${TD_MAKE_INSTALL_SH} ${TD_COMMUNITY_DIR} ${PROJECT_BINARY_DIR} Darwin ${TD_VER_NUMBER})")
ENDIF ()
diff --git a/cmake/platform.inc b/cmake/platform.inc
index a78082a1fc62a8ad66c54dcf005e3e15edf5f5f0..328c5f23ee95af54daa7e4a925c33ce09acd3cfb 100755
--- a/cmake/platform.inc
+++ b/cmake/platform.inc
@@ -1,4 +1,4 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
+CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
#
@@ -21,7 +21,7 @@ SET(TD_LINUX FALSE)
SET(TD_ARM_32 FALSE)
SET(TD_MIPS_64 FALSE)
SET(TD_MIPS_32 FALSE)
- SET(TD_APLHINE FALSE)
+ SET(TD_ALPINE FALSE)
SET(TD_NINGSI FALSE)
SET(TD_NINGSI_60 FALSE)
SET(TD_NINGSI_80 FALSE)
@@ -36,7 +36,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# Get OS information and store in variable TD_OS_INFO.
#
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh)
- execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
+ execute_process(COMMAND sh ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
MESSAGE(STATUS "The current os is " ${TD_OS_INFO})
SET(TD_LINUX TRUE)
@@ -52,8 +52,13 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
ENDIF ()
IF (${TD_OS_INFO} MATCHES "Alpine")
- SET(TD_APLHINE TRUE)
- MESSAGE(STATUS "The current OS is Alpine, append extra flags")
+ SET(TD_ALPINE TRUE)
+ MESSAGE(STATUS "The current OS is Alpine Linux, append extra flags")
+ ELSEIF (${TD_OS_INFO} MATCHES "Arch")
+ SET(TD_ARCHLINUX TRUE)
+ MESSAGE(STATUS "The current OS is Arch Linux")
+ ELSE ()
+ MESSAGE(STATUS "Ths distro is " ${TD_OS_INFO})
ENDIF()
ELSEIF (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
SET(TD_DARWIN TRUE)
@@ -155,7 +160,7 @@ ELSEIF (${OSTYPE} MATCHES "Linux")
MESSAGE(STATUS "input osType: Linux")
ELSEIF (${OSTYPE} MATCHES "Alpine")
MESSAGE(STATUS "input osType: Alpine")
- SET(TD_APLHINE TRUE)
+ SET(TD_ALPINE TRUE)
ELSE ()
MESSAGE(STATUS "The user specified osType is unknown: " ${OSTYPE})
ENDIF ()
diff --git a/cmake/version.inc b/cmake/version.inc
index 1d3b25e9237ef507811fa234dda4211acd6eb885..94ff39f5e655d89b16b57a4b8c8fbe275c82a49a 100755
--- a/cmake/version.inc
+++ b/cmake/version.inc
@@ -1,10 +1,10 @@
-CMAKE_MINIMUM_REQUIRED(VERSION 2.8...3.20)
+CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "2.3.0.0")
+ SET(TD_VER_NUMBER "2.3.1.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt
index 45828245e2d541114a2ae0a287e0c6acbd0d42be..773a791a2527712270f569d5c04aa7f8ef066e40 100644
--- a/deps/CMakeLists.txt
+++ b/deps/CMakeLists.txt
@@ -25,10 +25,36 @@ IF (TD_DARWIN AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()
+IF (TD_AVRO_SUPPORT)
+ MESSAGE("")
+ MESSAGE("${Green} ENABLE avro format support ${ColourReset}")
+ MESSAGE("")
+ include(ExternalProject)
+ ExternalProject_Add(
+ apache-avro
+ PREFIX "avro"
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c
+ BUILD_IN_SOURCE 1
+ PATCH_COMMAND
+ COMMAND git clean -f -d
+ COMMAND sed -i.bak -e "/TARGETS avroappend/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ COMMAND sed -i.bak -e "/TARGETS avrocat/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ COMMAND sed -i.bak -e "/TARGETS avromod/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ COMMAND sed -i.bak -e "/TARGETS avropipe/d" ${CMAKE_CURRENT_SOURCE_DIR}/avro/lang/c/src/CMakeLists.txt
+ CONFIGURE_COMMAND cmake -DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_BINARY_DIR}/build
+ )
+ELSE ()
+ MESSAGE("")
+ MESSAGE("${Yellow} NO avro format support ${ColourReset}")
+ MESSAGE("")
+ENDIF ()
+
IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
+ MESSAGE("")
+ MESSAGE("${Green} ENABLE jemalloc ${ColourReset}")
+ MESSAGE("")
MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
- include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
@@ -39,5 +65,5 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
ENDIF ()
IF (${TSZ_ENABLED} MATCHES "true")
- ADD_SUBDIRECTORY(TSZ)
-ENDIF()
\ No newline at end of file
+ ADD_SUBDIRECTORY(TSZ)
+ENDIF()
diff --git a/deps/TSZ b/deps/TSZ
deleted file mode 160000
index 0ca5b15a8eac40327dd737be52c926fa5675712c..0000000000000000000000000000000000000000
--- a/deps/TSZ
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 0ca5b15a8eac40327dd737be52c926fa5675712c
diff --git a/deps/avro b/deps/avro
new file mode 160000
index 0000000000000000000000000000000000000000..a1fce29d9675b4dd95dfee9db32cc505d0b2227c
--- /dev/null
+++ b/deps/avro
@@ -0,0 +1 @@
+Subproject commit a1fce29d9675b4dd95dfee9db32cc505d0b2227c
diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md
index 24654ed407ea121c627e0488888a455f9a858646..70a6b7c5281e1a96f8348ff3a3bb81892b80c93c 100644
--- a/documentation20/cn/00.index/docs.md
+++ b/documentation20/cn/00.index/docs.md
@@ -81,6 +81,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [Node.js Connector](/connector#nodejs):给node应用提供一个连接TDengine服务器的驱动
* [C# Connector](/connector#csharp):给C#应用提供一个连接TDengine服务器的驱动
* [Windows客户端](https://www.taosdata.com/blog/2019/07/26/514.html):自行编译windows客户端,Windows环境的各种连接器都需要它
+* [Rust Connector](/connector/rust): Rust语言下通过libtaos客户端或RESTful接口,连接TDengine服务器。
## [与其他工具的连接](/connections)
@@ -131,7 +132,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [TDengine写入性能测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html)
* [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html)
* [基于Electron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI)
-* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md))
+* [基于DataX的TDeninge数据迁移工具](https://www.taosdata.com/blog/2021/10/26/3156.html)
## TDengine与其他数据库的对比测试
diff --git a/documentation20/cn/02.getting-started/02.taosdemo/docs.md b/documentation20/cn/02.getting-started/02.taosdemo/docs.md
index fee6708d3a51fa71fed64e31ade72a8dac05b259..3f91dbb35130a2ff78e5ef23219b79433af33ce3 100644
--- a/documentation20/cn/02.getting-started/02.taosdemo/docs.md
+++ b/documentation20/cn/02.getting-started/02.taosdemo/docs.md
@@ -145,7 +145,7 @@ insert delay, avg: 8.31ms, max: 860.12ms, min: 2.00ms
```
$ taosdemo --help
--f, --file=FILE The meta file to the execution procedure.
+-f, --file=FILE The meta file to the execution procedure. Currently, we support standard UTF-8 (without BOM) encoded files only.
-u, --user=USER The user name to use when connecting to the server.
-p, --password The password to use when connecting to the server.
-c, --config-dir=CONFIG_DIR Configuration directory.
@@ -442,7 +442,7 @@ TDengine是涛思数据专为物联网、车联网、工业互联网、IT运维
taosdemo支持两种配置参数的模式,一种是命令行参数,一种是使用json格式的配置文件。
一、命令行参数
--f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。
+-f:指定taosdemo所需参数的meta文件。当使用该参数时,其他所有命令行参数都失效。可选项,缺省是NULL。目前仅支持不含 BOM(byte-order mark)的标准 UTF-8 编码文件。
-u: 用户名。可选项,缺省是“root“。
diff --git a/documentation20/cn/05.insert/docs.md b/documentation20/cn/05.insert/docs.md
index 9a0e9b388e639d5e6c6e5094682f07a223c01ada..24ac6c52e19e24697c8ad35fdaf822adbd614a0a 100644
--- a/documentation20/cn/05.insert/docs.md
+++ b/documentation20/cn/05.insert/docs.md
@@ -27,13 +27,18 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
- 对同一张表,如果新插入记录的时间戳已经存在,默认情形下(UPDATE=0)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用了 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。
- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还早的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days为2,那么无法写入比当前时间还晚2天的数据。
-## Schemaless 写入
+## 无模式(Schemaless)写入
+**前言**
+
在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
+
目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless)章节。这里对 Schemaless 的数据表达格式进行了描述。
+
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,您也可以通过 SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
-在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 从 2.2.0.0 版本开始,提供 Schemaless 写入方式,可以免于预先创建超级表/数据子表,而是随着数据写入,自动创建与数据对应的存储结构。并且在必要时,Schemaless 将自动增加必要的数据列,保证用户写入的数据可以被正确存储。目前,TDengine 的 C/C++ Connector 提供支持 Schemaless 的操作接口,详情请参见 [Schemaless 方式写入接口](https://www.taosdata.com/cn/documentation/connector#schemaless) 章节。这里对 Schemaless 的数据表达格式进行描述。
+**无模式写入行协议**
+
TDengine 的无模式写入的行协议兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议、OpenTSDB 的 Json 格式协议。但是使用这三种协议的时候,需要在 API 中指定输入内容使用解析协议的标准。
-### Schemaless 数据行协议
+对于InfluxDB、OpenTSDB的标准写入协议请参考各自的文档。下面首先以 InfluxDB 的行协议为基础,介绍 TDengine 扩展的协议内容,允许用户采用更加精细的方式控制(超级表)模式。
-Schemaless 采用一个字符串来表达最终存储的一个数据行(可以向 Schemaless 写入 API 中一次传入多个字符串来实现多个数据行的批量写入),其格式约定如下:
+Schemaless 采用一个字符串来表达一个数据行(可以向写入 API 中一次传入多行字符串来实现多个数据行的批量写入),其格式约定如下:
```json
measurement,tag_set field_set timestamp
```
@@ -44,70 +49,123 @@ measurement,tag_set field_set timestamp
* field_set 将作为普通列数据,其格式形如 `=,=`,同样是使用英文逗号来分隔多个普通列的数据。它与 timestamp 之间使用一个半角空格来分隔。
* timestamp 即本行数据对应的主键时间戳。
-在 Schemaless 的数据行协议中,tag_set、field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
+tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要使用双引号(")。
+
在无模式写入数据行协议中,field_set 中的每个数据项都需要对自身的数据类型进行描述。具体来说:
* 如果两边有英文双引号,表示 BIANRY(32) 类型。例如 `"abc"`。
* 如果两边有英文双引号而且带有 L 前缀,表示 NCHAR(32) 类型。例如 `L"报错信息"`。
* 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
* 数值类型将通过后缀来区分数据类型:
- - 没有后缀,为 FLOAT 类型;
- - 后缀为 f32,为 FLOAT 类型;
- - 后缀为 f64,为 DOUBLE 类型;
- - 后缀为 i8,表示为 TINYINT (INT8) 类型;
- - 后缀为 i16,表示为 SMALLINT (INT16) 类型;
- - 后缀为 i32,表示为 INT (INT32) 类型;
- - 后缀为 i64,表示为 BIGINT (INT64) 类型;
+
+| **序号** | **后缀** | **映射类型** | **大小(字节)** |
+| -- | ------- | ---------| ------ |
+| 1 | 无或f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8 | TinyInt | 1 |
+| 4 | i16 | SmallInt | 2 |
+| 5 | i32 | Int | 4 |
+| 6 | i64或i | Bigint | 8 |
* t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
+
例如如下数据行表示:向名为 st 的超级表下的 t1 标签为 "3"(NCHAR)、t2 标签为 "4"(NCHAR)、t3 标签为 "t3"(NCHAR)的数据子表,写入 c1 列为 3(BIGINT)、c2 列为 false(BOOL)、c3 列为 "passit"(BINARY)、c4 列为 4(DOUBLE)、主键时间戳为 1626006833639000000 的一行数据。
+```json
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
+```
+需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。
-timestamp 位置的时间戳通过后缀来声明时间精度,具体如下:
-* 不带任何后缀的长整数会被当作微秒来处理;
-* 当后缀为 s 时,表示秒时间戳;
-* 当后缀为 ms 时,表示毫秒时间戳;
-* 当后缀为 us 时,表示微秒时间戳;
-* 当后缀为 ns 时,表示纳秒时间戳;
-* 当时间戳为 0 时,表示采用客户端的当前时间(因此,同一批提交的数据中,时间戳 0 会被解释为同一个时间点,于是就有可能导致时间戳重复)。
+### 无模式写入的主要处理逻辑
-例如,如下 Schemaless 数据行表示:向名为 st 的超级表下的 t1 标签为 3(BIGINT 类型)、t2 标签为 4(DOUBLE 类型)、t3 标签为 "t3"(BINARY 类型)的数据子表,写入 c1 列为 3(BIGINT 类型)、c2 列为 false(BOOL 类型)、c3 列为 "passit"(NCHAR 类型)、c4 列为 4(DOUBLE 类型)、主键时间戳为 1626006833639000000(纳秒精度)的一行数据。
+无模式写入按照如下原则来处理行数据:
+1. 当 tag_set 中有 ID 字段时,该字段的值将作为子表的表名。
+2. 没有 ID 字段时,将使用如下规则来生成子表名:
+首先将measurement 的名称和标签的 key 和 value 组合成为如下的字符串
+```json
+"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
+```
+需要注意的是,这里的tag_key1, tag_key2并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
+排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
+
3. 如果解析行协议获得的超级表不存在,则会创建这个超级表。
+
4. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
+
5. 如果数据行中指定的标签列或普通列不存在,则在超级表中增加对应的标签列或普通列(只增不减)。
+
6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。
+
7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
+
8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。
+
9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
+
+**备注:**
+
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。
+
+**时间分辨率识别**
+
无模式写入过程中支持三个指定的模式,具体如下
+
+| **序号** | **值** | **说明** |
+| ---- | ------------------- | ------------ |
+| 1 | SML_LINE_PROTOCOL | InfluxDB行协议(Line Protocol) |
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB文本行协议 |
+| 3 | SML_JSON_PROTOCOL | Json协议格式 |
+
+
在 SML_LINE_PROTOCOL 解析模式下,需要用户指定输入的时间戳的时间分辨率。可用的时间分辨率如下表所示:
+
+| **序号** | **时间分辨率定义** | **含义** |
+| ---- | ----------------------------- | --------- |
+| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | 未定义(无效) |
+| 2 | TSDB_SML_TIMESTAMP_HOURS | 小时 |
+| 3 | TSDB_SML_TIMESTAMP_MINUTES | 分钟 |
+| 4 | TSDB_SML_TIMESTAMP_SECONDS | 秒 |
+| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | 毫秒 |
+| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | 微秒 |
+| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | 纳秒 |
+
+在 SML_TELNET_PROTOCOL 和 SML_JSON_PROTOCOL 模式下,根据时间戳的长度来确定时间精度(与 OpenTSDB 标准操作方式相同),此时会忽略用户指定的时间分辨率。
+
+**数据模式变更处理**
+
本节将说明不同行数据写入情况下,对于数据模式的影响。
+
+在使用行协议写入一个明确的标识的字段类型的时候,后续更改该字段的类型定义,会出现明确的数据模式错误,即会触发写入 API 报告错误。如下所示,
```json
-st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000ns
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
```
+第一行的数据类型映射将 c4 列定义为 Double, 但是第二行的数据又通过数值后缀方式声明该列为 BigInt, 由此会触发无模式写入的解析错误。
-需要注意的是,如果描述数据类型后缀时使用了错误的大小写,或者为数据指定的数据类型有误,均可能引发报错提示而导致数据写入失败。
+如果列前面的行协议将数据列声明为了 binary, 后续的要求长度更长的binary长度,此时会触发超级表模式的变更。
+```json
+st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
+```
+第一行中行协议解析会声明 c5 列是一个 binary(4)的字段,第二次行数据写入会提取列 c5 仍然是 binary 列,但是其宽度为 6,此时需要将binary的宽度增加到能够容纳 新字符串的宽度。
+```json
+st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
+```
+第二行数据相对于第一行来说增加了一个列 c6,类型为binary(6)。那么此时会自动增加一个列 c6, 类型为 binary(6)。
-### Schemaless 的处理逻辑
+**写入完整性**
+
TDengine 提供数据写入的幂等性保证,即您可以反复调用 API 进行出错数据的写入操作。但是不提供多行数据写入的原子性保证。即在多行数据一批次写入过程中,会出现部分数据写入成功,部分数据写入失败的情况。
-Schemaless 按照如下原则来处理行数据:
-1. 当 tag_set 中有 ID 字段时,该字段的值将作为数据子表的表名。
-2. 没有 ID 字段时,将使用 `measurement + tag_value1 + tag_value2 + ...` 的 md5 值来作为子表名。
-3. 如果指定的超级表名不存在,则 Schemaless 会创建这个超级表。
-4. 如果指定的数据子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
-5. 如果数据行中指定的标签列或普通列不存在,则 Schemaless 会在超级表中增加对应的标签列或普通列(只增不减)。
-6. 如果超级表中存在一些标签列或普通列未在一个数据行中被指定取值,那么这些列的值在这一行中会被置为 NULL。
-7. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,那么 Schemaless 会增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
-8. 如果指定的数据子表已经存在,而且本次指定的标签列取值跟已保存的值不一样,那么最新的数据行中的值会覆盖旧的标签列取值。
-9. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
+**错误码**
+
如果是无模式写入过程中的数据本身错误,应用会得到 TSDB_CODE_TSC_LINE_SYNTAX_ERROR 错误信息,该错误信息表明错误发生在写入文本中。其他的错误码与原系统一致,可以通过 taos_errstr 获取具体的错误原因。
-**注意:**Schemaless 所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 16k 字节。这方面的具体限制约束请参见 [TAOS SQL 边界限制](https://www.taosdata.com/cn/documentation/taos-sql#limitation) 章节。
+**后续升级计划**
+
当前版本只提供了 C 版本的 API,后续将提供 其他高级语言的 API,例如 Java/Go/Python/C# 等。此外,在TDengine v2.3及后续版本中,您还可以通过 BLM v3 采用 REST 的方式直接写入无模式数据。
-关于 Schemaless 的字符串编码处理、时区设置等,均会沿用 TAOSC 客户端的设置。
## Prometheus 直接写入
[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目,在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需对Prometheus做简单配置,无需任何代码,就可将Prometheus采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用Bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
-### 从源代码编译 blm_prometheus
+### 从源代码编译 taosadapter_prometheus
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
- Linux操作系统的服务器
- 安装好Golang,1.14版本以上
- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
-Bailongma项目中有一个文件夹blm_prometheus,存放了prometheus的写入API程序。编译过程如下:
+Bailongma项目中有一个文件夹taosadapter_prometheus,存放了prometheus的写入API程序。编译过程如下:
```bash
-cd blm_prometheus
+cd taosadapter_prometheus
go build
```
-一切正常的情况下,就会在对应的目录下生成一个blm_prometheus的可执行程序。
+一切正常的情况下,就会在对应的目录下生成一个taosadapter_prometheus的可执行程序。
### 安装 Prometheus
@@ -118,23 +176,23 @@ go build
参考Prometheus的[配置文档](https://prometheus.io/docs/prometheus/latest/configuration/configuration/),在Prometheus的配置文件中的部分,增加以下配置:
```
- - url: "bailongma API服务提供的URL"(参考下面的blm_prometheus启动示例章节)
+ - url: "bailongma API服务提供的URL"(参考下面的taosadapter_prometheus启动示例章节)
```
启动Prometheus后,可以通过taos客户端查询确认数据是否成功写入。
-### 启动 blm_prometheus 程序
+### 启动 taosadapter_prometheus 程序
-blm_prometheus程序有以下选项,在启动blm_prometheus程序时可以通过设定这些选项来设定blm_prometheus的配置。
+taosadapter_prometheus程序有以下选项,在启动taosadapter_prometheus程序时可以通过设定这些选项来设定taosadapter_prometheus的配置。
```bash
--tdengine-name
如果TDengine安装在一台具备域名的服务器上,也可以通过配置TDengine的域名来访问TDengine。在K8S环境下,可以配置成TDengine所运行的service name。
--batch-size
-blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
+taosadapter_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
--dbname
-设置在TDengine中创建的数据库名称,blm_prometheus会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
+设置在TDengine中创建的数据库名称,taosadapter_prometheus会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
--dbuser
设置访问TDengine的用户名,缺省值是'root'。
@@ -143,16 +201,16 @@ blm_prometheus会将收到的prometheus的数据拼装成TDengine的写入请求
设置访问TDengine的密码,缺省值是'taosdata'。
--port
-blm_prometheus对prometheus提供服务的端口号。
+taosadapter_prometheus对prometheus提供服务的端口号。
```
### 启动示例
-通过以下命令启动一个blm_prometheus的API服务
+通过以下命令启动一个taosadapter_prometheus的API服务
```bash
-./blm_prometheus -port 8088
+./taosadapter_prometheus -port 8088
```
-假设blm_prometheus所在服务器的IP地址为"10.1.2.3",则在prometheus的配置文件中部分增加url为
+假设taosadapter_prometheus所在服务器的IP地址为"10.1.2.3",则在prometheus的配置文件中部分增加url为
```yaml
remote_write:
- url: "http://10.1.2.3:8088/receive"
@@ -177,16 +235,16 @@ prometheus产生的数据格式如下:
}
}
```
-其中,apiserver_request_latencies_bucket为prometheus采集的时序数据的名称,后面{}中的为该时序数据的标签。blm_prometheus会以时序数据的名称在TDengine中自动创建一个超级表,并将{}中的标签转换成TDengine的tag值,Timestamp作为时间戳,value作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
+其中,apiserver_request_latencies_bucket为prometheus采集的时序数据的名称,后面{}中的为该时序数据的标签。taosadapter_prometheus会以时序数据的名称在TDengine中自动创建一个超级表,并将{}中的标签转换成TDengine的tag值,Timestamp作为时间戳,value作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
```mysql
use prometheus;
select * from apiserver_request_latencies_bucket;
```
-## Telegraf 直接写入(通过 BLM v3)
+## Telegraf 直接写入(通过 taosadapter)
安装 Telegraf 请参考[官方文档](https://portal.influxdata.com/downloads/)。
-TDengine 新版本(2.3.0.0+)包含一个 BLM3 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。
+TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 Telegraf 的多种应用的数据写入。
配置方法,在 /etc/telegraf/telegraf.conf 增加如下文字,其中 database name 请填写希望在 TDengine 保存 Telegraf 数据的数据库名,TDengine server/cluster host、username和 password 填写 TDengine 实际值:
```
@@ -206,14 +264,14 @@ sudo systemctl start telegraf
```
即可在 TDengine 中查询 metrics 数据库中 Telegraf 写入的数据。
-BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。
+taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
-## collectd 直接写入(通过 BLM v3)
+## collectd 直接写入(通过 taosadapter)
安装 collectd,请参考[官方文档](https://collectd.org/download.shtml)。
-TDengine 新版本(2.3.0.0+)包含一个 BLM3 独立程序,负责接收包括 collectd 的多种应用的数据写入。
+TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 collectd 的多种应用的数据写入。
-在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 BLM3 配置的实际值:
+在 /etc/collectd/collectd.conf 文件中增加如下内容,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值:
```
LoadPlugin network
@@ -224,15 +282,15 @@ LoadPlugin network
```
sudo systemctl start collectd
```
-BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。
+taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
-## StatsD 直接写入(通过 BLM v3)
+## StatsD 直接写入(通过 taosadapter)
安装 StatsD
请参考[官方文档](https://github.com/statsd/statsd)。
-TDengine 新版本(2.3.0.0+)包含一个 BLM3 独立程序,负责接收包括 StatsD 的多种应用的数据写入。
+TDengine 新版本(2.3.0.0+)包含一个 taosadapter 独立程序,负责接收包括 StatsD 的多种应用的数据写入。
-在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 BLM3 配置的实际值:
+在 config.js 文件中增加如下内容后启动 StatsD,其中 host 和 port 请填写 TDengine 和 taosadapter 配置的实际值:
```
backends 部分添加 "./backends/repeater"
repeater 部分添加 { host:'', port: }
@@ -247,16 +305,16 @@ port: 8125
}
```
-BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。
+taosadapter 相关配置参数请参考 taosadapter --help 命令输出以及相关文档。
-## 使用 Bailongma 2.0 接入 Telegraf 数据写入
+## 使用 Bailongma 2.0 接入 Telegraf 数据写入
-*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 BLM v3,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。
+*注意:TDengine 新版本(2.3.0.0+)提供新版本 Bailongma ,命名为 taosadapter ,提供更简便的 Telegraf 数据写入以及其他更强大的功能,Bailongma v2 即之前版本将逐步不再维护。
[Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)是一流行的IT运维数据采集开源工具,TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma),只需在Telegraf做简单配置,无需任何代码,就可将Telegraf采集的数据直接写入TDengine,并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例,可以参考。
-### 从源代码编译 blm_telegraf
+### 从源代码编译 taosadapter_telegraf
用户需要从github下载[Bailongma](https://github.com/taosdata/Bailongma)的源码,使用Golang语言编译器编译生成可执行文件。在开始编译前,需要准备好以下条件:
@@ -264,14 +322,14 @@ BLM v3 相关配置参数请参考 blm3 --help 命令输出以及相关文档。
- 安装好Golang,1.10版本以上
- 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在Bailongma所在的Linux服务器(可以与TDengine在同一台服务器,或者不同服务器)
-Bailongma项目中有一个文件夹blm_telegraf,存放了Telegraf的写入API程序。编译过程如下:
+Bailongma项目中有一个文件夹taosadapter_telegraf,存放了Telegraf的写入API程序。编译过程如下:
```bash
-cd blm_telegraf
+cd taosadapter_telegraf
go build
```
-一切正常的情况下,就会在对应的目录下生成一个blm_telegraf的可执行程序。
+一切正常的情况下,就会在对应的目录下生成一个taosadapter_telegraf的可执行程序。
### 安装 Telegraf
@@ -294,19 +352,19 @@ go build
关于如何使用Telegraf采集数据以及更多有关使用Telegraf的信息,请参考Telegraf官方的[文档](https://docs.influxdata.com/telegraf/v1.11/)。
-### 启动 blm_telegraf 程序
+### 启动 taosadapter_telegraf 程序
-blm_telegraf程序有以下选项,在启动blm_telegraf程序时可以通过设定这些选项来设定blm_telegraf的配置。
+taosadapter_telegraf程序有以下选项,在启动taosadapter_telegraf程序时可以通过设定这些选项来设定taosadapter_telegraf的配置。
```bash
--host
TDengine服务端的IP地址,缺省值为空。
--batch-size
-blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
+taosadapter_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,这个参数控制一次发给TDengine的写入请求中携带的数据条数。
--dbname
-设置在TDengine中创建的数据库名称,blm_telegraf会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
+设置在TDengine中创建的数据库名称,taosadapter_telegraf会自动在TDengine中创建一个以dbname为名称的数据库,缺省值是prometheus。
--dbuser
设置访问TDengine的用户名,缺省值是'root'。
@@ -315,17 +373,17 @@ blm_telegraf会将收到的telegraf的数据拼装成TDengine的写入请求,
设置访问TDengine的密码,缺省值是'taosdata'。
--port
-blm_telegraf对telegraf提供服务的端口号。
+taosadapter_telegraf对telegraf提供服务的端口号。
```
### 启动示例
-通过以下命令启动一个blm_telegraf的API服务:
+通过以下命令启动一个taosadapter_telegraf的API服务:
```bash
-./blm_telegraf -host 127.0.0.1 -port 8089
+./taosadapter_telegraf -host 127.0.0.1 -port 8089
```
-假设blm_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项:
+假设taosadapter_telegraf所在服务器的IP地址为"10.1.2.3",则在telegraf的配置文件中, 在output plugins部分,增加[[outputs.http]]配置项:
```yaml
url = "http://10.1.2.3:8089/telegraf"
@@ -358,7 +416,7 @@ telegraf产生的数据格式如下:
}
```
-其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。blm_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
+其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。taosadapter_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。
```mysql
use telegraf;
diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md
index 110b902b2051a88e14eaa73627780e56be158928..cd924f052e03f1be728d8f578c13d808e9a5c9b3 100644
--- a/documentation20/cn/08.connector/01.java/docs.md
+++ b/documentation20/cn/08.connector/01.java/docs.md
@@ -334,7 +334,6 @@ JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错(
从 2.1.2.0 版本开始,TDengine 的 **JDBC-JNI** 实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。(注意:**JDBC-RESTful** 实现并不提供参数绑定这种使用方式。)
```java
-Statement stmt = conn.createStatement();
Random r = new Random();
// INSERT 语句中,VALUES 部分允许指定具体的数据列;如果采取自动建表,则 TAGS 部分需要设定全部 TAGS 列的参数值:
diff --git a/documentation20/cn/08.connector/02.rust/docs.md b/documentation20/cn/08.connector/02.rust/docs.md
new file mode 100644
index 0000000000000000000000000000000000000000..01d4087e3acf2eed2dbea207d6d48ff360b5aece
--- /dev/null
+++ b/documentation20/cn/08.connector/02.rust/docs.md
@@ -0,0 +1,110 @@
+# Rust 连接器
+
+ 
+
+> Rust 连接器仍然在快速开发中,版本API变动在所难免,在1.0 之前无法保证其向后兼容,请使用时注意版本及对应的文档。
+
+感谢 [@songtianyi](https://github.com/songtianyi) 对 [libtdengine](https://github.com/songtianyi/tdengine-rust-bindings) 的贡献,使Rust社区能够使用Rust 连接[TDengine]. [libtaos-rs] 项目旨在为Rust开发者提供官方支持,使用taosc接口及HTTP接口构建兼容API以便于用户切换接口方式。
+
+## 依赖
+
+- [Rust](https://www.rust-lang.org/learn/get-started)
+
+默认情况下,[libtaos-rs] 使用 C 接口连接数据库,所以您需要:
+
+- [TDengine] [客户端](https://www.taosdata.com/cn/getting-started/#%E9%80%9A%E8%BF%87%E5%AE%89%E8%A3%85%E5%8C%85%E5%AE%89%E8%A3%85)
+- `clang`: `bindgen` 使用 `libclangAST` 来生成对应的Rust绑定。
+
+## 特性列表
+
+- [x] C 接口的Rust绑定
+- [x] 使用 `rest` feature 来启用 RESTful API.
+- [x] [r2d2] 连接池支持(feature `r2d2`)
+- [ ] 迭代器接口
+- [ ] 流式计算接口
+- [ ] 订阅支持
+
+## 构建和测试
+
+```sh
+cargo build
+cargo test
+```
+
+测试使用默认用户名密码和本地连接。您可以根据具体情况设置环境变量:
+
+- `TEST_TAOS_IP`
+- `TEST_TAOS_PORT`
+- `TEST_TAOS_USER`
+- `TEST_TAOS_PASS`
+- `TEST_TAOS_DB`
+
+## 使用
+
+使用默认的taosc 连接方式,可以在 `Cargo.toml` 中直接添加 `libtaos` 依赖:
+
+```toml
+[dependencies]
+libtaos = "v0.3.8"
+```
+
+添加 feature `r2d2` 来启动连接池:
+
+```toml
+[dependencies]
+libtaos = { version = "*", features = ["r2d2"] }
+```
+
+对于RESTful接口,可使用 `rest` 特性来替代taosc,免去安装TDengine客户端。
+
+```toml
+[dependencies]
+libtaos = { version = "*", features = ["rest"] }
+```
+
+本项目中提供一个 [示例程序]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) 如下:
+
+```rust
+// ...
+#[tokio::main]
+async fn main() -> Result<(), Error> {
+ init();
+ let taos = taos_connect()?;
+
+ assert_eq!(
+ taos.query("drop database if exists demo").await.is_ok(),
+ true
+ );
+ assert_eq!(taos.query("create database demo").await.is_ok(), true);
+ assert_eq!(taos.query("use demo").await.is_ok(), true);
+ assert_eq!(
+ taos.query("create table m1 (ts timestamp, speed int)")
+ .await
+ .is_ok(),
+ true
+ );
+
+ for i in 0..10i32 {
+ assert_eq!(
+ taos.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str())
+ .await
+ .is_ok(),
+ true
+ );
+ }
+ let rows = taos.query("select * from m1").await?;
+
+ println!("{}", rows.column_meta.into_iter().map(|col| col.name).join(","));
+ for row in rows.rows {
+ println!("{}", row.into_iter().join(","));
+ }
+ Ok(())
+}
+```
+
+您可以在 [bailongma-rs] - 一个 Rust 编写的 Prometheus 远程存储 API 适配器 - 看到如何在具体应用中使用 Rust 连接器。
+
+[libtaos-rs]: https://github.com/taosdata/libtaos-rs
+[TDengine]: https://github.com/taosdata/TDengine
+[bailongma-rs]: https://github.com/taosdata/bailongma-rs
+[r2d2]: https://crates.io/crates/r2d2
\ No newline at end of file
diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md
index 2295de6f9bba359d691af5e92c2bb8026beaf5a1..b4543111b22008467ba749018fa2c19321f4f18e 100644
--- a/documentation20/cn/08.connector/docs.md
+++ b/documentation20/cn/08.connector/docs.md
@@ -573,6 +573,14 @@ cd C:\TDengine\connector\python
python -m pip install .
```
+**PyPI**
+
+从2.1.1版本开始,用户可以从[PyPI](https://pypi.org/project/taospy/)安装:
+
+```sh
+pip install taospy
+```
+
* 如果机器上没有pip命令,用户可将src/connector/python下的taos文件夹拷贝到应用程序的目录使用。
对于windows 客户端,安装TDengine windows 客户端后,将C:\TDengine\driver\taos.dll拷贝到C:\windows\system32目录下即可。
@@ -608,6 +616,22 @@ python3 PythonChecker.py -host
### Python连接器的使用
+#### PEP-249 兼容API
+
+您可以像其他数据库一样,使用类似 [PEP-249](https://www.python.org/dev/peps/pep-0249/) 数据库API规范风格的API:
+
+```python
+import taos
+
+conn = taos.connect()
+cursor = conn.cursor()
+
+cursor.execute("show databases")
+results = cursor.fetchall()
+for row in results:
+ print(row)
+```
+
#### 代码示例
* 导入TDengine客户端模块
@@ -663,6 +687,44 @@ for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]))
```
+* 从v2.1.0版本开始, 我们提供另外一种API:`connection.query`
+
+ ```python
+ import taos
+
+ conn = taos.connect()
+ conn.execute("create database if not exists pytest")
+
+ result = conn.query("show databases")
+ num_of_fields = result.field_count
+ for field in result.fields:
+ print(field)
+ for row in result:
+ print(row)
+ conn.execute("drop database pytest")
+ ```
+
+ `query` 方法会返回一个 `TaosResult` 类对象,并提供了以下有用的属性或方法:
+
+ 属性:
+
+ - `fields`: `TaosFields` 集合类,提供返回数据的列信息。
+ - `field_count`: 返回数据的列数.
+ - `affected_rows`: 插入数据的行数.
+ - `row_count`: 查询数据结果数.
+ - `precision`: 当前数据库的时间精度.
+
+ 方法:
+
+ - `fetch_all()`: 类似于 `cursor.fetchall()` 返回同样的集合数据
+ - `fetch_all_into_dict()`: v2.1.1 新添加的API,将上面的数据转换成字典类型返回
+ - `blocks_iter()` `rows_iter()`: 根据底层API提供的两种不同迭代器。
+ - `fetch_rows_a`: 异步API
+ - `errno`: 错误码
+ - `errstr`: 错误信息
+ - `close`: 关闭结果对象,一般不需要直接调用
+
+
* 创建订阅
```python
diff --git a/documentation20/cn/09.connections/docs.md b/documentation20/cn/09.connections/docs.md
index 799cfc14a300d3f4c9fcbf8537f04984ae8e1df4..bc3259365d0b658184318e994ffd31a9e4ffee90 100644
--- a/documentation20/cn/09.connections/docs.md
+++ b/documentation20/cn/09.connections/docs.md
@@ -3,7 +3,7 @@
## Grafana
-TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。
+TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/)快速集成搭建数据监测报警系统,整个过程无需任何代码开发,TDengine 中数据表中内容可以在仪表盘(DashBoard)上进行可视化展现。关于TDengine插件的使用您可以在[GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md)中了解更多。
### 安装Grafana
@@ -11,19 +11,24 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 配置Grafana
-TDengine 的 Grafana 插件在安装包的 /usr/local/taos/connector/grafanaplugin 目录下。
-
-以 CentOS 7.2 操作系统为例,将 grafanaplugin 目录拷贝到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
+TDengine 的 Grafana 插件请从 下载。
```bash
-sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
+GF_VERSION=3.1.1
+wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
```
-Grafana 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
+以 CentOS 7.2 操作系统为例,将插件包解压到 /var/lib/grafana/plugins 目录下,重新启动 grafana 即可。
+
+```bash
+sudo unzip tdengine-datasource-$GF_VERSION.zip -d /var/lib/grafana/plugins/
```
+
+Grafana 7.3+ / 8.x 版本会对插件进行签名检查,因此还需要在 grafana.ini 文件中增加如下行,才能正确使用插件:
+
+```ini
[plugins]
-enable_alpha = true
-allow_loading_unsigned_plugins = taosdata-tdengine-datasource
+allow_loading_unsigned_plugins = tdengine-datasource
```
### 使用 Grafana
@@ -62,7 +67,6 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
* ALIAS BY:可设置当前查询别名。
* GENERATE SQL: 点击该按钮会自动替换相应变量,并生成最终执行的语句。
-
按照默认提示查询当前 TDengine 部署所在服务器指定间隔系统内存平均使用量如下:

@@ -71,16 +75,15 @@ allow_loading_unsigned_plugins = taosdata-tdengine-datasource
#### 导入 Dashboard
-在 Grafana 插件目录 /usr/local/taos/connector/grafanaplugin/dashboard 下提供了一个 `tdengine-grafana.json` 可导入的 dashboard。
+我们提供一个 TDengine Dashboard 可以作为 TDengine 集群的监控可视化工具使用,见 [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。
-点击左侧 `Import` 按钮,并上传 `tdengine-grafana.json` 文件:
+点击左侧 `Import` 按钮,选择 **Grafana.com Dashboard**,j将id `15146` 填入并加载:

导入完成之后可看到如下效果:
-
-
+
## MATLAB
diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md
index cb3a87127323d32dc9654c5abbe7a1b5788e97c6..4ba496d575e0f680c2dbd2820d3dfc062c56cb1c 100644
--- a/documentation20/cn/12.taos-sql/docs.md
+++ b/documentation20/cn/12.taos-sql/docs.md
@@ -729,17 +729,17 @@ Query OK, 1 row(s) in set (0.001091s)
| **Operation** | **Note** | **Applicable Data Types** |
| ------------- | ------------------------ | ----------------------------------------- |
-| > | larger than | **`timestamp`** and all numeric types |
-| < | smaller than | **`timestamp`** and all numeric types |
-| >= | larger than or equal to | **`timestamp`** and all numeric types |
-| <= | smaller than or equal to | **`timestamp`** and all numeric types |
+| > | larger than | all types except bool |
+| < | smaller than | all types except bool |
+| >= | larger than or equal to | all types except bool |
+| <= | smaller than or equal to | all types except bool |
| = | equal to | all types |
| <> | not equal to | all types |
| is [not] null | is null or is not null | all types |
-| between and | within a certain range | **`timestamp`** and all numeric types |
+| between and | within a certain range | all types except bool |
| in | match any value in a set | all types except first column `timestamp` |
| like | match a wildcard string | **`binary`** **`nchar`** |
-| match/nmatch | filter regex | **regex** |
+| match/nmatch | filter regex | **`binary`** **`nchar`** |
1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。
2. like 算子使用通配符字符串进行匹配检查。
@@ -766,15 +766,10 @@ Query OK, 1 row(s) in set (0.001091s)
**使用限制**
- 只能针对表名(即 tbname 筛选)和标签的名称和binary类型标签值 进行正则表达式过滤,不支持针对普通列使用正则表达式过滤。
-
- 只能在 WHERE 子句中作为过滤条件存在。
+ 只能针对表名(即 tbname 筛选)、binary/nchar类型标签值进行正则表达式过滤,不支持普通列的过滤。
正则匹配字符串长度不能超过 128 字节。可以通过参数 *maxRegexStringLen* 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启才能生效。
- **嵌套查询支持**
-
- 可以在内层查询和外层查询中使用。
### JOIN 子句
@@ -1579,11 +1574,11 @@ SELECT function_list FROM stb_name
CREATE TABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT);
```
-针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数、以及随着时间变化的电流走势拟合直线。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
+针对智能电表采集的数据,以 10 分钟为一个阶段,计算过去 24 小时的电流数据的平均值、最大值、电流的中位数。如果没有计算值,用前一个非 NULL 值填充。使用的查询语句如下:
```mysql
-SELECT AVG(current), MAX(current), LEASTSQUARES(current, start_val, step_val), PERCENTILE(current, 50) FROM meters
- WHERE ts>=NOW-1d
+SELECT AVG(current), MAX(current), APERCENTILE(current, 50) FROM meters
+ WHERE ts>=NOW-1d and ts<=now
INTERVAL(10m)
FILL(PREV);
```
diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md
index 7483c972eebe26d0b010724ea699cd94906f382c..eb5f20e708bb4bb592a1ab2d535fcf261457b989 100644
--- a/documentation20/cn/13.faq/docs.md
+++ b/documentation20/cn/13.faq/docs.md
@@ -185,23 +185,23 @@ TDengine 中时间戳的时区总是由客户端进行处理,而与服务端
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。 |
| TCP | 6042 | Arbitrator 的服务端口。 | 随 Arbitrator 启动参数设置变化。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 |
-| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 BLM3 启动参数设置变化(2.3.0.1+以上版本)。 |
-| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 BLM3 启动参数设置变化(2.3.0.1+以上版本)。 |
+| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 |
+| TCP | 6045 | 支持 collectd 数据接入端口。 | 随 taosadapter 启动参数设置变化(2.3.0.1+以上版本)。 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
| UDP | 6030-6034 | 客户端与服务端之间通讯。 | 随 serverPort 端口变化。 |
| UDP | 6035-6039 | 多节点集群的节点间通讯。 | 随 serverPort 端口变化。 |
## 20. go 语言编写组件编译失败怎样解决?
-新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 BLM3 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。
-使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 blm3 仓库代码后再编译。
+新版本 TDengine 2.3.0.0 包含一个使用 go 语言开发的 taosadapter 组件,取代之前内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD等)的数据接入功能。
+使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosadapter 仓库代码后再编译。
-目前编译方式默认自动编译 blm3。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
+目前编译方式默认自动编译 taosadapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决:
```sh
go env -w GO111MODULE=on
go env -w GOPROXY=https://goproxy.cn,direct
```
-如果希望继续使用之前的内置 httpd,可以关闭 blm3 编译,使用
+如果希望继续使用之前的内置 httpd,可以关闭 taosadapter 编译,使用
`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。
diff --git a/documentation20/cn/14.devops/01.telegraf/docs.md b/documentation20/cn/14.devops/01.telegraf/docs.md
index 4bdcd52d62f8c3a95bc91261b77242e5263a8f23..04765602dab18fbacf7d92d44ca324db660c0ac4 100644
--- a/documentation20/cn/14.devops/01.telegraf/docs.md
+++ b/documentation20/cn/14.devops/01.telegraf/docs.md
@@ -30,12 +30,14 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置
-### 复制 TDengine 插件到 grafana 插件目录
-```
-1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
-2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
-3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
-4. sudo systemctl restart grafana-server.service
+### 下载 TDengine 插件到 grafana 插件目录
+
+```bash
+1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
+2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
+3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
+4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
+5. sudo systemctl restart grafana-server.service
```
### 修改 /etc/telegraf/telegraf.conf
@@ -61,7 +63,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 IP:3000 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 Plugins,应该可以找到 TDengine data source 插件图标。
-点击左侧加号图标并选择 Import,按照界面提示选择 /usr/local/taos/connector/grafanaplugin/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 文件。如果按照 Grafana 的机器上没有安装 TDengine,可以从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件再导入。之后可以看到如下界面的仪表盘:
+点击左侧加号图标并选择 Import,从 https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:

diff --git a/documentation20/cn/14.devops/02.collectd/docs.md b/documentation20/cn/14.devops/02.collectd/docs.md
index 2a031d63e55ed7888332757170b781beae787ff7..a35772bb498d426a1f44a9e7eb0bea61b51f92a5 100644
--- a/documentation20/cn/14.devops/02.collectd/docs.md
+++ b/documentation20/cn/14.devops/02.collectd/docs.md
@@ -30,11 +30,13 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
## 数据链路设置
### 复制 TDengine 插件到 grafana 插件目录
-```
-1. sudo cp -r /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
-2. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
-3. echo -e "[plugins]\nallow_loading_unsigned_plugins = taosdata-tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
-4. sudo systemctl restart grafana-server.service
+
+```bash
+1. wget -c https://github.com/taosdata/grafanaplugin/releases/download/v3.1.1/tdengine-datasource-3.1.1.zip
+2. sudo unzip tdengine-datasource-3.1.1.zip -d /var/lib/grafana/plugins/
+3. sudo chown grafana:grafana -R /var/lib/grafana/plugins/tdengine
+4. echo -e "[plugins]\nallow_loading_unsigned_plugins = tdengine-datasource\n" | sudo tee -a /etc/grafana/grafana.ini
+5. sudo systemctl restart grafana-server.service
```
### 配置 collectd
@@ -62,13 +64,13 @@ repeater 部分添加 { host:'', port: Note that the rust connector is under active development and the APIs will changes a lot between versions. But we promise to ensure backward compatibility after version 1.0 .
+
+Thanks [@songtianyi](https://github.com/songtianyi) for [libtdengine](https://github.com/songtianyi/tdengine-rust-bindings) - a rust bindings project for [TDengine]. It's an new design for [TDengine] rust client based on C interface or the REST API. It'll will provide Rust-like APIs and all rust things (like async/stream/iterators and others).
+
+## Dependencies
+
+- [Rust](https://www.rust-lang.org/learn/get-started) of course.
+
+if you use the default features, it'll depend on:
+
+- [TDengine] Client library and headers.
+- clang because bindgen will requires the clang AST library.
+
+## Fetures
+
+In-design features:
+
+- [x] API for both C interface
+- [x] REST API support by feature `rest`.
+- [x] [r2d2] Pool support by feature `r2d2`
+- [ ] Iterators for fields fetching
+- [ ] Stream support
+- [ ] Subscribe support
+
+## Build and test
+
+```sh
+cargo build
+cargo test
+```
+
+`test` will use default TDengine user and password on localhost (TDengine default).
+
+Set variables if it's not default:
+
+- `TEST_TAOS_IP`
+- `TEST_TAOS_PORT`
+- `TEST_TAOS_USER`
+- `TEST_TAOS_PASS`
+- `TEST_TAOS_DB`
+
+## Usage
+
+For default C-based client API, set in Cargo.toml
+
+```toml
+[dependencies]
+libtaos = "v0.3.8"
+```
+
+For r2d2 support:
+
+```toml
+[dependencies]
+libtaos = { version = "*", features = ["r2d2"] }
+```
+
+For REST client:
+
+```toml
+[dependencies]
+libtaos = { version = "*", features = ["rest"] }
+```
+
+There's a [demo app]([examples/demo.rs](https://github.com/taosdata/libtaos-rs/blob/main/examples/demo.rs)) in examples directory, looks like this:
+
+```rust
+// ...
+#[tokio::main]
+async fn main() -> Result<(), Error> {
+ init();
+ let taos = taos_connect()?;
+
+ assert_eq!(
+ taos.query("drop database if exists demo").await.is_ok(),
+ true
+ );
+ assert_eq!(taos.query("create database demo").await.is_ok(), true);
+ assert_eq!(taos.query("use demo").await.is_ok(), true);
+ assert_eq!(
+ taos.query("create table m1 (ts timestamp, speed int)")
+ .await
+ .is_ok(),
+ true
+ );
+
+ for i in 0..10i32 {
+ assert_eq!(
+ taos.query(format!("insert into m1 values (now+{}s, {})", i, i).as_str())
+ .await
+ .is_ok(),
+ true
+ );
+ }
+ let rows = taos.query("select * from m1").await?;
+
+ println!("{}", rows.column_meta.into_iter().map(|col| col.name).join(","));
+ for row in rows.rows {
+ println!("{}", row.into_iter().join(","));
+ }
+ Ok(())
+}
+```
+
+You can check out the experimental [bailongma-rs](https://github.com/taosdata/bailongma-rs) - a TDengine adapters for prometheus written with Rust - as a more productive code example.
+
+[libtaos-rs]: https://github.com/taosdata/libtaos-rs
+[TDengine]: https://github.com/taosdata/TDengine
+[bailongma-rs]: https://github.com/taosdata/bailongma-rs
+[r2d2]: https://crates.io/crates/r2d2
\ No newline at end of file
diff --git a/documentation20/en/08.connector/docs.md b/documentation20/en/08.connector/docs.md
index 57efd27dcc1b90775c7f2bfc6fbcbca57dc503ff..806bebd77738bd4251607237e3f88c589baa4741 100644
--- a/documentation20/en/08.connector/docs.md
+++ b/documentation20/en/08.connector/docs.md
@@ -419,19 +419,47 @@ or
`pip3 install src/connector/python/`
+You can install the `taospy` connector from [PyPI](https://pypi.org/project/taospy/):
+
+```sh
+pip install taospy
+```
+
#### Windows
-With Windows TDengine client installed, copy the file "C:\TDengine\driver\taos.dll" to the "C:\ windows\ system32" directory and enter the Windows cmd command line interface:
+With Windows TDengine client installed, copy the file "C:\TDengine\driver\taos.dll" to the "C:\Windows\system32" directory and enter the Windows *cmd* command line interface:
```cmd
cd C:\TDengine\connector\python
python -m pip install .
```
+Or install from PyPI:
+
+```cmd
+pip install taospy
+```
+
- If there is no `pip` command on the machine, the user can copy the taos folder under src/connector/python to the application directory for use. For Windows client, after installing the TDengine Windows client, copy C:\ TDengine\driver\taos.dll to the C:\ windows\ system32 directory.
### How to use
+#### PEP-249 Python Database API
+
+Definitely you can use the [PEP-249](https://www.python.org/dev/peps/pep-0249/) database API like other type of databases:
+
+```python
+import taos
+
+conn = taos.connect()
+cursor = conn.cursor()
+
+cursor.execute("show databases")
+results = cursor.fetchall()
+for row in results:
+ print(row)
+```
+
#### Code sample
- Import the TDengine client module
@@ -488,6 +516,44 @@ for data in c1:
print("ts=%s, temperature=%d, humidity=%f" %(data[0], data[1],data[2]))
```
+- Since v2.1.0, python connector provides a new API for query:
+
+```python
+import taos
+
+conn = taos.connect()
+conn.execute("create database if not exists pytest")
+
+result = conn.query("show databases")
+num_of_fields = result.field_count
+for field in result.fields:
+ print(field)
+for row in result:
+ print(row)
+conn.execute("drop database pytest")
+```
+
+The `query` method returns `TaosResult` class. It provides high level APIs for convenient use:
+
+Properties:
+
+- `fields`: the `TaosFields` object contains the column metadata, given the collection of each column field metadata by iterator.
+- `field_count`: column number of result.
+- `affected_rows`: the rows completed for insert.
+- `row_count`: the rows number for select.
+- `precision`: the result precision.
+
+Functions:
+
+- `fetch_all()`: get all data as tuple array.
+- `fetch_all_into_dict()`: get all data as dict array, added since v2.1.1
+- `blocks_iter()`: provides iterator by C `taos_fetch_blocks` API
+- `rows_iter()`: provides iterator by C `taos_fetch_row` API
+- `fetch_rows_a`: fetch rows by async API in taosc.
+- `errno`: error code if failed.
+- `errstr`: error string if failed.
+- `close`: close result, you do not need to call it directly, result will auto closed out of scope.
+
- Create subscription
```python
@@ -510,6 +576,7 @@ for d in data:
sub.close()
```
+
- Close connection
```python
diff --git a/documentation20/en/09.connections/docs.md b/documentation20/en/09.connections/docs.md
index f1bbf0ff639719c7609f4a04685adf9c16a4e623..b56458d351d23a2b61f88cfdf7dc64dc8043a295 100644
--- a/documentation20/en/09.connections/docs.md
+++ b/documentation20/en/09.connections/docs.md
@@ -12,12 +12,17 @@ https://grafana.com/grafana/download.
### Configure Grafana
-TDengine Grafana plugin is in the /usr/local/taos/connector/grafanaplugin directory.
+Download grafana plugin from .
+
+```bash
+GF_VERSION=3.1.1
+wget https://github.com/taosdata/grafanaplugin/releases/download/v$GF_VERSION/tdengine-datasource-$GF_VERSION.zip
+```
Taking Centos 7.2 as an example, just copy grafanaplugin directory to /var/lib/grafana/plugins directory and restart Grafana.
```bash
-sudo cp -rf /usr/local/taos/connector/grafanaplugin /var/lib/grafana/plugins/tdengine
+sudo unzip tdengine-datasource-$GF_VERSION.zip /var/lib/grafana/plugins/
```
### Use Grafana
@@ -64,15 +69,15 @@ According to the default prompt, query the average system memory usage at the sp
#### Import Dashboard
-A `tdengine-grafana.json` importable dashboard is provided under the Grafana plug-in directory `/usr/local/taos/connector/grafanaplugin/dashboard`.
+We provide an example dashboard [Grafana Dashboard 15146](https://grafana.com/grafana/dashboards/15146)。
-Click the `Import` button on the left panel and upload the `tdengine-grafana.json` file:
+Click the `Import` button on the left panel and load the grafana id:

You can see as follows after Dashboard imported.
-
+
## MATLAB
diff --git a/documentation20/en/images/connections/dashboard-15146.png b/documentation20/en/images/connections/dashboard-15146.png
new file mode 100644
index 0000000000000000000000000000000000000000..3eb240ad8ad648953e32f27e674e2a9171ed9af8
Binary files /dev/null and b/documentation20/en/images/connections/dashboard-15146.png differ
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index 48f0bee6b34496603d67f74938857d7bb94627f2..e42212ff0f55420dfa5f23638a69439be795e43a 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -203,6 +203,9 @@ keepColumnName 1
# database name must be specified in restful interface if the following parameter is set, off by default
# httpDbNameMandatory 1
+# http keep alive, default is 30 seconds
+# httpKeepAlive 30000
+
# The following parameter is used to limit the maximum number of lines in log files.
# max number of lines per log filters
# numOfLogLines 10000000
diff --git a/packaging/check_package.sh b/packaging/check_package.sh
index edc98da65e5574b91efbce16f4df0fd042b18c13..0870e8c8eccc1a745ae5b081e2726ed8d809cf2b 100755
--- a/packaging/check_package.sh
+++ b/packaging/check_package.sh
@@ -128,12 +128,12 @@ function check_link() {
function check_main_path() {
#check install main dir and all sub dir
main_dir=("" "cfg" "bin" "connector" "driver" "examples" "include" "init.d")
- for i in ${main_dir[@]};do
+ for i in "${main_dir[@]}";do
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
- for i in ${nginx_main_dir[@]};do
+ for i in "${nginx_main_dir[@]}";do
check_file ${nginx_dir} $i
done
fi
@@ -142,12 +142,12 @@ function check_main_path() {
function check_bin_path() {
# check install bin dir and all sub dir
- bin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
- for i in ${bin_dir[@]};do
+ bin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "remove.sh" "tarbitrator" "set_core.sh")
+ for i in "${bin_dir[@]}";do
check_file ${sbin_dir} $i
done
- lbin_dir=("taos" "taosd" "blm3" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
- for i in ${lbin_dir[@]};do
+ lbin_dir=("taos" "taosd" "taosadapter" "taosdemo" "taosdump" "rmtaos" "tarbitrator" "set_core")
+ for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
if [ "$verMode" == "cluster" ]; then
@@ -171,16 +171,17 @@ function check_lib_path() {
function check_header_path() {
# check all header
header_dir=("taos.h" "taoserror.h")
- for i in ${header_dir[@]};do
+ for i in "${header_dir[@]}";do
check_link ${inc_link_dir}/$i
done
echo -e "Check bin path:\033[32mOK\033[0m!"
}
-function check_blm3_config_dir() {
+function check_taosadapter_config_dir() {
# check all config
- check_file ${cfg_install_dir} blm3.toml
- check_file ${install_main_dir}/cfg blm.toml.org
+ check_file ${cfg_install_dir} taosadapter.toml
+ check_file ${cfg_install_dir} taosadapter.service
+ check_file ${install_main_dir}/cfg taosadapter.toml.org
echo -e "Check conf path:\033[32mOK\033[0m!"
}
@@ -221,7 +222,7 @@ function test_TDengine() {
check_lib_path
check_header_path
check_config_dir
- check_blm3_config_dir
+ check_taosadapter_config_dir
check_log_path
check_data_path
result=`taos -s 'create database test ;create table test.tt(ts timestamp ,i int);insert into test.tt values(now,11);select * from test.tt' 2>&1 ||:`
diff --git a/packaging/deb/DEBIAN/control b/packaging/deb/DEBIAN/control
index c01640d7e9adb4f7f8d6eb29f06008480dc8eee4..fd3f81ba082d11f6ff3979382a63597b5806fa1f 100644
--- a/packaging/deb/DEBIAN/control
+++ b/packaging/deb/DEBIAN/control
@@ -11,4 +11,3 @@ Maintainer: support@taosdata.com
Provides: taosdata
Homepage: http://taosdata.com
Description: Big Data Platform Designed and Optimized for IoT.
-
diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst
index 55218b471669887bd0d4066bb9ef91bf1f195031..4b8b72e9abd9e12d9f669cf5658be2468ebab40b 100644
--- a/packaging/deb/DEBIAN/preinst
+++ b/packaging/deb/DEBIAN/preinst
@@ -28,8 +28,12 @@ if [ -f "${install_main_dir}/taos.cfg" ]; then
${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || :
fi
-if [ -f "${install_main_dir}/blm.toml" ]; then
- ${csudo} rm -f ${install_main_dir}/cfg/blm.toml || :
+if [ -f "${install_main_dir}/taosadapter.toml" ]; then
+ ${csudo} rm -f ${install_main_dir}/cfg/taosadapter.toml || :
+fi
+
+if [ -f "${install_main_dir}/taosadapter.service" ]; then
+ ${csudo} rm -f ${install_main_dir}/cfg/taosadapter.service || :
fi
# there can not libtaos.so*, otherwise ln -s error
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index e2043ba54cef0db4f4fd729f2c2285c342b6b109..235834a747e82886eef6c4540877307aa4dd3996 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -25,7 +25,7 @@ else
# Remove all links
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/blm3 || :
+ ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 2c18cec497c0a741c96f13afb06794e26e8eaf1c..f753668b3b1a83d15c126ae6b0d94c06e97c80aa 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -44,8 +44,11 @@ mkdir -p ${pkg_dir}${install_home_path}/init.d
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
-if [ -f "${compile_dir}/test/cfg/blm.toml" ]; then
- cp ${compile_dir}/test/cfg/blm.toml ${pkg_dir}${install_home_path}/cfg
+if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
+ cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg
+fi
+if [ -f "${compile_dir}/test/cfg/taosadapter.service" ]; then
+ cp ${compile_dir}/test/cfg/taosadapter.service ${pkg_dir}${install_home_path}/cfg ||:
fi
cp ${compile_dir}/../packaging/deb/taosd ${pkg_dir}${install_home_path}/init.d
@@ -59,8 +62,8 @@ cp ${compile_dir}/build/bin/taosdemo ${pkg_dir}${install_home_pat
cp ${compile_dir}/build/bin/taosdump ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin
-if [ -f "${compile_dir}/build/bin/blm3" ]; then
- cp ${compile_dir}/build/bin/blm3 ${pkg_dir}${install_home_path}/bin ||:
+if [ -f "${compile_dir}/build/bin/taosadapter" ]; then
+ cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||:
fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
@@ -68,19 +71,24 @@ cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_pat
cp ${compile_dir}/../src/inc/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../src/inc/taoserror.h ${pkg_dir}${install_home_path}/include
cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_path}/examples
-if [ -d "${top_dir}/src/connector/grafanaplugin/dist" ]; then
- cp -r ${top_dir}/src/connector/grafanaplugin/dist ${pkg_dir}${install_home_path}/connector/grafanaplugin
-else
- echo "grafanaplugin bundled directory not found!"
- exit 1
-fi
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*.* ${pkg_dir}${install_home_path}/connector ||:
+install_user_local_path="/usr/local"
+
+if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
+ mkdir -p ${pkg_dir}${install_user_local_path}/lib
+ cp ${compile_dir}/build/lib/libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/
+ ln -sf libavro.so.23.0.0 ${pkg_dir}${install_user_local_path}/lib/libavro.so.23
+ ln -sf libavro.so.23 ${pkg_dir}${install_user_local_path}/lib/libavro.so
+fi
+if [ -f ${compile_dir}/build/lib/libavro.a ]; then
+ cp ${compile_dir}/build/lib/libavro.a ${pkg_dir}${install_user_local_path}/lib/
+fi
+
if [ -f ${compile_dir}/build/bin/jemalloc-config ]; then
- install_user_local_path="/usr/local"
mkdir -p ${pkg_dir}${install_user_local_path}/{bin,lib,lib/pkgconfig,include/jemalloc,share/doc/jemalloc,share/man/man3}
cp ${compile_dir}/build/bin/jemalloc-config ${pkg_dir}${install_user_local_path}/bin/
if [ -f ${compile_dir}/build/bin/jemalloc.sh ]; then
@@ -120,6 +128,10 @@ chmod 755 ${pkg_dir}/DEBIAN/*
debver="Version: "$tdengine_ver
sed -i "2c$debver" ${pkg_dir}/DEBIAN/control
+if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
+ sed -i.bak "s/#Depends: no/Depends: libjansson4, libsnappy1v5/g" ${pkg_dir}/DEBIAN/control
+fi
+
#get taos version, then set deb name
@@ -151,4 +163,3 @@ cp ${pkg_dir}/*.deb ${output_dir}
# clean tmep dir
rm -rf ${pkg_dir}
-
diff --git a/packaging/deb/taosd b/packaging/deb/taosd
index a14e61ac8cfb67b970ee89a2fd4cda9d7937b23f..5002607da20b621ca69a8a2a25e713879d0308af 100644
--- a/packaging/deb/taosd
+++ b/packaging/deb/taosd
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash
#
# Modified from original source: Elastic Search
# https://github.com/elasticsearch/elasticsearch
@@ -25,7 +25,7 @@ GROUP="root"
DAEMON="/usr/local/taos/bin/taosd"
DAEMON_OPTS=""
-HTTPD_NAME="blm3"
+HTTPD_NAME="taosadapter"
DAEMON_HTTPD_NAME=$HTTPD_NAME
DAEMON_HTTPD="/usr/local/taos/bin/$HTTPD_NAME"
diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh
index e4d3cda7f29fea96cabfe48f5b10ab668a085ea8..9f60b840d68577b751314e7ddecc9da98c20f8d6 100755
--- a/packaging/docker/dockerManifest.sh
+++ b/packaging/docker/dockerManifest.sh
@@ -45,24 +45,32 @@ echo "version=${version}"
#docker manifest rm tdengine/tdengine:${version}
if [ "$verType" == "beta" ]; then
docker manifest inspect tdengine/tdengine-beta:latest
+ docker manifest inspect tdengine/tdengine-beta:${version}
+ docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
+ sleep 30
+ docker manifest rm tdengine/tdengine-beta:${version}
docker manifest rm tdengine/tdengine-beta:latest
docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
- docker manifest push tdengine/tdengine-beta:latest
docker manifest push tdengine/tdengine-beta:${version}
-
+ docker manifest push tdengine/tdengine-beta:latest
elif [ "$verType" == "stable" ]; then
docker manifest inspect tdengine/tdengine:latest
+ docker manifest inspect tdengine/tdengine:${version}
+ docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
+ sleep 30
docker manifest rm tdengine/tdengine:latest
+ docker manifest rm tdengine/tdengine:${version}
+ docker manifest inspect tdengine/tdengine:latest
+ docker manifest inspect tdengine/tdengine:${version}
docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
docker login -u tdengine -p ${passWord} #replace the docker registry username and password
- docker manifest push tdengine/tdengine:latest
docker manifest push tdengine/tdengine:${version}
-
+ docker manifest push tdengine/tdengine:latest
else
echo "unknow verType, nor stabel or beta"
exit 1
diff --git a/packaging/release.sh b/packaging/release.sh
index 705103a87a35a73b2a91079707785279416644cd..b9fe25ec08e8dcd1170867fa20f4a4fe5a1ef2d1 100755
--- a/packaging/release.sh
+++ b/packaging/release.sh
@@ -151,7 +151,7 @@ function vercomp () {
}
# 1. check version information
-if (( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]]); then
+if ( ( ! is_valid_version $verNumber ) || ( ! is_valid_version $verNumberComp ) || [[ "$(vercomp $verNumber $verNumberComp)" == '2' ]] ); then
echo "please enter correct version"
exit 0
fi
@@ -213,7 +213,7 @@ else
exit 1
fi
-make -j8
+make -j8 && ${csudo} make install
cd ${curr_dir}
diff --git a/packaging/rpm/makerpm.sh b/packaging/rpm/makerpm.sh
index 4cc7daf1a4cd15d06db084faf23dd4fcb15a955d..42ceeb791b6154f7d22a477bf3b3c3b8c726869c 100755
--- a/packaging/rpm/makerpm.sh
+++ b/packaging/rpm/makerpm.sh
@@ -32,20 +32,20 @@ if command -v sudo > /dev/null; then
fi
function cp_rpm_package() {
-local cur_dir
-cd $1
-cur_dir=$(pwd)
-
-for dirlist in $(ls ${cur_dir}); do
- if test -d ${dirlist}; then
- cd ${dirlist}
- cp_rpm_package ${cur_dir}/${dirlist}
- cd ..
- fi
- if test -e ${dirlist}; then
- cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
- fi
-done
+ local cur_dir
+ cd $1
+ cur_dir=$(pwd)
+
+ for dirlist in "$(ls ${cur_dir})"; do
+ if test -d ${dirlist}; then
+ cd ${dirlist}
+ cp_rpm_package ${cur_dir}/${dirlist}
+ cd ..
+ fi
+ if test -e ${dirlist}; then
+ cp ${cur_dir}/${dirlist} ${output_dir}/TDengine-${tdengine_ver}.rpm
+ fi
+ done
}
if [ -d ${pkg_dir} ]; then
@@ -56,6 +56,10 @@ cd ${pkg_dir}
${csudo} mkdir -p BUILD BUILDROOT RPMS SOURCES SPECS SRPMS
+if [ -f ${compile_dir}/build/lib/libavro.so.23.0.0 ]; then
+ sed -i.bak 's/#Requires:/Requires: jansson snappy/g' ${spec_file}
+fi
+
${csudo} rpmbuild --define="_version ${tdengine_ver}" --define="_topdir ${pkg_dir}" --define="_compiledir ${compile_dir}" -bb ${spec_file}
# copy rpm package to output_dir, and modify package name, then clean temp dir
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 19fe23d194be2266bcb68034e3c4fd90d9824f3d..f7b8462dbedc74a270a8560bb51a853e292cff27 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -54,8 +54,11 @@ mkdir -p %{buildroot}%{homepath}/init.d
mkdir -p %{buildroot}%{homepath}/script
cp %{_compiledir}/../packaging/cfg/taos.cfg %{buildroot}%{homepath}/cfg
-if [ -f %{_compiledir}/test/cfg/blm.toml ]; then
- cp %{_compiledir}/test/cfg/blm.toml %{buildroot}%{homepath}/cfg
+if [ -f %{_compiledir}/test/cfg/taosadapter.toml ]; then
+ cp %{_compiledir}/test/cfg/taosadapter.toml %{buildroot}%{homepath}/cfg
+fi
+if [ -f %{_compiledir}/test/cfg/taosadapter.service ]; then
+ cp %{_compiledir}/test/cfg/taosadapter.service %{buildroot}%{homepath}/cfg
fi
cp %{_compiledir}/../packaging/rpm/taosd %{buildroot}%{homepath}/init.d
cp %{_compiledir}/../packaging/tools/post.sh %{buildroot}%{homepath}/script
@@ -65,26 +68,28 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin
cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin
-if [ -f %{_compiledir}/build/bin/blm3 ]; then
- cp %{_compiledir}/build/bin/blm3 %{buildroot}%{homepath}/bin ||:
+if [ -f %{_compiledir}/build/bin/taosadapter ]; then
+ cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||:
fi
cp %{_compiledir}/build/bin/taosdemo %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/bin/taosdump %{buildroot}%{homepath}/bin
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
cp %{_compiledir}/../src/inc/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/include
-if [ -d %{_compiledir}/../src/connector/grafanaplugin/dist ]; then
- cp -r %{_compiledir}/../src/connector/grafanaplugin/dist %{buildroot}%{homepath}/connector/grafanaplugin
-else
- echo grafanaplugin bundled directory not found!
- exit 1
-fi
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*.* %{buildroot}%{homepath}/connector ||:
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples
+if [ -f %{_compiledir}/build/lib/libavro.so.23.0.0 ]; then
+ cp %{_compiledir}/build/lib/libavro.so.23.0.0 %{buildroot}%{homepath}/driver
+ ln -sf libavro.so.23.0.0 %{buildroot}%{homepath}/driver/libavro.so.23
+ ln -sf libavro.so.23 %{buildroot}%{homepath}/driver/libavro.so
+fi
+if [ -f %{_compiledir}/build/lib/libavro.a ]; then
+ cp %{_compiledir}/build/lib/libavro.a %{buildroot}%{homepath}/driver
+fi
if [ -f %{_compiledir}/build/bin/jemalloc-config ]; then
mkdir -p %{buildroot}%{userlocalpath}/bin
@@ -151,14 +156,14 @@ if pidof taosd &> /dev/null; then
echo "Stop taosd service success!"
sleep 1
fi
-# if taos.cfg already softlink, remove it
+# if taos.cfg already exist, remove it
if [ -f %{cfg_install_dir}/taos.cfg ]; then
- ${csudo} rm -f %{homepath}/cfg/taos.cfg || :
+ ${csudo} rm -f %{cfg_install_dir}/cfg/taos.cfg || :
fi
-# if blm.toml already softlink, remove it
-if [ -f %{cfg_install_dir}/blm.toml ]; then
- ${csudo} rm -f %{homepath}/cfg/blm.toml || :
+# if taosadapter.toml already exist, remove it
+if [ -f %{cfg_install_dir}/taosadapter.toml ]; then
+ ${csudo} rm -f %{cfg_install_dir}/cfg/taosadapter.toml || :
fi
# there can not libtaos.so*, otherwise ln -s error
@@ -199,7 +204,7 @@ if [ $1 -eq 0 ];then
# Remove all links
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/blm3 || :
+ ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${cfg_link_dir}/* || :
diff --git a/packaging/tools/check_os.sh b/packaging/tools/check_os.sh
index 92522f7b82e166c1d6ec365619869ad68969155c..cc8c6e0e9366232deb9013db62b29afebd179135 100755
--- a/packaging/tools/check_os.sh
+++ b/packaging/tools/check_os.sh
@@ -1,4 +1,4 @@
-# /bin/bash
+#!/bin/bash
#
CSI=$(echo -e "\033[")
CRED="${CSI}1;31m"
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 2d3ed2e0f8f97c4604471659415a691d1b704a60..61fcd3e51982dab6a72245fe0ffb9de5ac51a664 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -185,7 +185,7 @@ function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/blm3 || :
+ ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
@@ -197,7 +197,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
- [ -x ${install_main_dir}/bin/blm3 ] && ${csudo} ln -s ${install_main_dir}/bin/blm3 ${bin_link_dir}/blm3 || :
+ [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/rmtaos || :
@@ -303,7 +303,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -358,7 +358,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
@@ -447,18 +447,18 @@ function local_fqdn_check() {
fi
}
-function install_blm3_config() {
- if [ ! -f "${cfg_install_dir}/blm.toml" ]; then
+function install_taosadapter_config() {
+ if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${script_dir}/cfg/blm.toml ] && ${csudo} cp ${script_dir}/cfg/blm.toml ${cfg_install_dir}
- [ -f ${cfg_install_dir}/blm.toml ] && ${csudo} chmod 644 ${cfg_install_dir}/blm.toml
+ [ -f ${script_dir}/cfg/taosadapter.toml ] && ${csudo} cp ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir}
+ [ -f ${cfg_install_dir}/taosadapter.toml ] && ${csudo} chmod 644 ${cfg_install_dir}/taosadapter.toml
fi
- [ -f ${script_dir}/cfg/blm.toml ] &&
- ${csudo} cp -f ${script_dir}/cfg/blm.toml ${install_main_dir}/cfg/blm.toml.org
+ [ -f ${script_dir}/cfg/taosadapter.toml ] &&
+ ${csudo} cp -f ${script_dir}/cfg/taosadapter.toml ${cfg_install_dir}/taosadapter.toml.new
- [ -f ${cfg_install_dir}/blm.toml ] &&
- ${csudo} ln -s ${cfg_install_dir}/blm.toml ${install_main_dir}/cfg/blm.toml
+ [ -f ${cfg_install_dir}/taosadapter.toml ] &&
+ ${csudo} ln -s ${cfg_install_dir}/taosadapter.toml ${install_main_dir}/cfg/taosadapter.toml
[ ! -z $1 ] && return 0 || : # only install client
@@ -473,7 +473,7 @@ function install_config() {
${csudo} chmod 644 ${cfg_install_dir}/*
fi
- ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
+ ${csudo} cp -f ${script_dir}/cfg/taos.cfg ${cfg_install_dir}/taos.cfg.new
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg
[ ! -z $1 ] && return 0 || : # only install client
@@ -679,8 +679,8 @@ function install_service_on_systemd() {
taosd_service_config="${service_config_dir}/taosd.service"
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
- ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}"
- ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
@@ -756,6 +756,11 @@ function install_service_on_systemd() {
fi
}
+function install_taosadapter_service() {
+ [ -f ${script_dir}/cfg/taosadapter.service ] &&\
+ ${csudo} cp ${script_dir}/cfg/taosadapter.service ${service_config_dir}/
+}
+
function install_service() {
if ((${service_mod}==0)); then
install_service_on_systemd
@@ -878,8 +883,9 @@ function update_TDengine() {
if [ -z $1 ]; then
install_bin
install_service
+ install_taosadapter_service
install_config
- install_blm3_config
+ install_taosadapter_config
openresty_work=false
if [ "$verMode" == "cluster" ]; then
@@ -959,6 +965,7 @@ function install_TDengine() {
# For installing new
install_bin
install_service
+ install_taosadapter_service
openresty_work=false
if [ "$verMode" == "cluster" ]; then
diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh
index 05eb09d8f3a8b5237c36714e964530b877e332de..0e0ee7ba31f4715b2c5585dd040727d604aa90b1 100755
--- a/packaging/tools/install_power.sh
+++ b/packaging/tools/install_power.sh
@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/install_pro.sh b/packaging/tools/install_pro.sh
index 527f9a231e5a97fa086ef655cd420abc61677fcf..e5675b858066148df07508ad2438b0f00d7ce7bf 100755
--- a/packaging/tools/install_pro.sh
+++ b/packaging/tools/install_pro.sh
@@ -278,7 +278,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -305,7 +305,7 @@ function set_hostname() {
echo "set hostname fail!"
return
fi
-
+
#ubuntu/centos /etc/hostname
if [[ -e /etc/hostname ]]; then
${csudo} echo $newHostname > /etc/hostname ||:
@@ -330,7 +330,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/install_tq.sh b/packaging/tools/install_tq.sh
index 52e08cb6b0d00b25686b87e2f066401e0388d4ce..ef5fb8c05a4a98a55918ee217125bd0f0a09b955 100755
--- a/packaging/tools/install_tq.sh
+++ b/packaging/tools/install_tq.sh
@@ -287,7 +287,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -342,7 +342,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 7fbdbab1c798af572fc67cf79f27812ea64d3bae..8309fa516c4ffdcd9e5a17056304427543dad0a9 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -114,8 +114,8 @@ if [ "$osType" != "Darwin" ]; then
fi
fi
-function kill_blm3() {
- pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}')
+function kill_taosadapter() {
+ pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
@@ -156,7 +156,7 @@ function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/blm3 || :
+ ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
@@ -176,7 +176,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || :
[ -x ${install_main_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || :
- [ -x ${install_main_dir}/bin/blm3 ] && ${csudo} ln -s ${install_main_dir}/bin/blm3 ${bin_link_dir}/blm3 || :
+ [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo} ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
@@ -191,7 +191,7 @@ function install_bin() {
#Make link
[ -x ${install_main_dir}/bin/taos ] || [ -x ${install_main_2_dir}/bin/taos ] && ${csudo} ln -s ${install_main_dir}/bin/taos ${bin_link_dir}/taos || ${csudo} ln -s ${install_main_2_dir}/bin/taos || :
[ -x ${install_main_dir}/bin/taosd ] || [ -x ${install_main_2_dir}/bin/taosd ] && ${csudo} ln -s ${install_main_dir}/bin/taosd ${bin_link_dir}/taosd || ${csudo} ln -s ${install_main_2_dir}/bin/taosd || :
- [ -x ${install_main_dir}/bin/blm3 ] || [ -x ${install_main_2_dir}/bin/blm3 ] && ${csudo} ln -s ${install_main_dir}/bin/blm3 ${bin_link_dir}/blm3 || ${csudo} ln -s ${install_main_2_dir}/bin/blm3 || :
+ [ -x ${install_main_dir}/bin/taosadapter ] || [ -x ${install_main_2_dir}/bin/taosadapter ] && ${csudo} ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter || ${csudo} ln -s ${install_main_2_dir}/bin/taosadapter || :
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo} ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo} ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
fi
@@ -212,7 +212,8 @@ function install_jemalloc() {
fi
if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc
- /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h /usr/local/include/jemalloc
+ /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\
+ /usr/local/include/jemalloc
fi
if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then
/usr/bin/install -c -d /usr/local/lib
@@ -225,23 +226,47 @@ function install_jemalloc() {
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig
- /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc /usr/local/lib/pkgconfig
+ /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\
+ /usr/local/lib/pkgconfig
+ fi
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
fi
fi
if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
- /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html /usr/local/share/doc/jemalloc
+ /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\
+ /usr/local/share/doc/jemalloc
fi
if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then
/usr/bin/install -c -d /usr/local/share/man/man3
- /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3 /usr/local/share/man/man3
+ /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\
+ /usr/local/share/man/man3
fi
- if [ -d /etc/ld.so.conf.d ]; then
- echo "/usr/local/lib" | ${csudo} tee /etc/ld.so.conf.d/jemalloc.conf
- ${csudo} ldconfig
- else
- echo "/etc/ld.so.conf.d not found!"
+ fi
+}
+
+function install_avro() {
+ if [ "$osType" != "Darwin" ]; then
+ if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then
+ /usr/bin/install -c -d /usr/local/$1
+ /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
+ ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
+ ln -sf libavro.so.23 /usr/local/$1/libavro.so
+ /usr/bin/install -c -d /usr/local/$1
+ [ -f ${binary_dir}/build/$1/libavro.a ] &&
+ /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
+
+ if [ -d /etc/ld.so.conf.d ]; then
+ echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf
+ ${csudo} ldconfig
+ else
+ echo "/etc/ld.so.conf.d not found!"
+ fi
fi
fi
}
@@ -292,6 +317,8 @@ function install_lib() {
fi
install_jemalloc
+ install_avro lib
+ install_avro lib64
if [ "$osType" != "Darwin" ]; then
${csudo} ldconfig
@@ -324,39 +351,33 @@ function install_config() {
[ -f ${script_dir}/../cfg/taos.cfg ] &&
${csudo} cp ${script_dir}/../cfg/taos.cfg ${cfg_install_dir}
${csudo} chmod 644 ${cfg_install_dir}/taos.cfg
- ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${install_main_dir}/cfg/taos.cfg
+ ${csudo} cp -f ${script_dir}/../cfg/taos.cfg \
+ ${cfg_install_dir}/taos.cfg.${verNumber}
+ ${csudo} ln -s ${cfg_install_dir}/taos.cfg \
+ ${install_main_dir}/cfg/taos.cfg
else
- if [ "$osType" != "Darwin" ]; then
- ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org
- else
- ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_dir}/cfg/taos.cfg.org\
- || ${csudo} cp -f ${script_dir}/../cfg/taos.cfg ${install_main_2_dir}/cfg/taos.cfg.org
- fi
+ ${csudo} cp -f ${script_dir}/../cfg/taos.cfg \
+ ${cfg_install_dir}/taos.cfg.${verNumber}
fi
}
-function install_blm3_config() {
- if [ ! -f "${cfg_install_dir}/blm.toml" ]; then
+function install_taosadapter_config() {
+ if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${binary_dir}/test/cfg/blm.toml ] &&
- ${csudo} cp ${binary_dir}/test/cfg/blm.toml ${cfg_install_dir}
- [ -f ${cfg_install_dir}/blm.toml ] &&
- ${csudo} chmod 644 ${cfg_install_dir}/blm.toml
- [ -f ${binary_dir}/test/cfg/blm.toml ] &&
- ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml ${install_main_dir}/cfg/blm.toml.org
- [ -f ${cfg_install_dir}/blm.toml ] &&
- ${csudo} ln -s ${cfg_install_dir}/blm.toml ${install_main_dir}/cfg/blm.toml
+ [ -f ${binary_dir}/test/cfg/taosadapter.toml ] &&
+ ${csudo} cp ${binary_dir}/test/cfg/taosadapter.toml ${cfg_install_dir}
+ [ -f ${cfg_install_dir}/taosadapter.toml ] &&
+ ${csudo} chmod 644 ${cfg_install_dir}/taosadapter.toml
+ [ -f ${binary_dir}/test/cfg/taosadapter.toml ] &&
+ ${csudo} cp -f ${binary_dir}/test/cfg/taosadapter.toml \
+ ${cfg_install_dir}/taosadapter.toml.${verNumber}
+ [ -f ${cfg_install_dir}/taosadapter.toml ] && \
+ ${csudo} ln -s ${cfg_install_dir}/taosadapter.toml \
+ ${install_main_dir}/cfg/taosadapter.toml
else
- if [ -f "${binary_dir}/test/cfg/blm.toml" ]; then
- if [ "$osType" != "Darwin" ]; then
- ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml \
- ${install_main_dir}/cfg/blm.toml.org
- else
- ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml ${install_main_dir}/cfg/blm.toml.org \
- || ${csudo} cp -f ${binary_dir}/test/cfg/blm.toml \
- ${install_main_2_dir}/cfg/blm.toml.org
- fi
+ if [ -f "${binary_dir}/test/cfg/taosadapter.toml" ]; then
+ ${csudo} cp -f ${binary_dir}/test/cfg/taosadapter.toml \
+ ${cfg_install_dir}/taosadapter.toml.${verNumber}
fi
fi
}
@@ -381,11 +402,6 @@ function install_data() {
}
function install_connector() {
- if [ -d "${source_dir}/src/connector/grafanaplugin/dist" ]; then
- ${csudo} cp -rf ${source_dir}/src/connector/grafanaplugin/dist ${install_main_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${source_dir}/src/connector/go -mindepth 1 -maxdepth 1 | read; then
${csudo} cp -r ${source_dir}/src/connector/go ${install_main_dir}/connector
else
@@ -481,8 +497,8 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
- ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}"
- ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
@@ -503,6 +519,16 @@ function install_service_on_systemd() {
${csudo} systemctl enable taosd
}
+function install_taosadapter_service() {
+ if ((${service_mod}==0)); then
+ [ -f ${binary_dir}/test/cfg/taosadapter.service ] &&\
+ ${csudo} cp ${binary_dir}/test/cfg/taosadapter.service\
+ ${service_config_dir}/ || :
+ else
+ kill_taosadapter
+ fi
+}
+
function install_service() {
if ((${service_mod}==0)); then
install_service_on_systemd
@@ -510,7 +536,6 @@ function install_service() {
install_service_on_sysvinit
else
# must manual stop taosd
- kill_blm3
kill_taosd
fi
}
@@ -526,7 +551,7 @@ function update_TDengine() {
elif ((${service_mod}==1)); then
${csudo} service taosd stop || :
else
- kill_blm3
+ kill_taosadapter
kill_taosd
fi
sleep 1
@@ -544,10 +569,11 @@ function update_TDengine() {
if [ "$osType" != "Darwin" ]; then
install_service
+ install_taosadapter_service
fi
install_config
- install_blm3_config
+ install_taosadapter_config
if [ "$osType" != "Darwin" ]; then
echo
@@ -555,7 +581,7 @@ function update_TDengine() {
echo
echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
- echo -e "${GREEN_DARK}To configure blm3 (if has) ${NC}: edit /etc/taos/blm.toml"
+ echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit /etc/taos/taosadapter.toml"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}"
elif ((${service_mod}==1)); then
@@ -598,10 +624,11 @@ function install_TDengine() {
if [ "$osType" != "Darwin" ]; then
install_service
+ install_taosadapter_service
fi
install_config
- install_blm3_config
+ install_taosadapter_config
if [ "$osType" != "Darwin" ]; then
# Ask if to start the service
@@ -609,7 +636,7 @@ function install_TDengine() {
echo -e "\033[44;32;1mTDengine is installed successfully!${NC}"
echo
echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg"
- echo -e "${GREEN_DARK}To configure blm (if has) ${NC}: edit /etc/taos/blm.toml"
+ echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit /etc/taos/taosadapter.toml"
if ((${service_mod}==0)); then
echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}"
elif ((${service_mod}==1)); then
diff --git a/packaging/tools/makeclient.sh b/packaging/tools/makeclient.sh
index d26f617e421406364ce4d34c4baf5c55b904a2b5..39a35e384fffdd4f319e72fbeb819fe08f7871b8 100755
--- a/packaging/tools/makeclient.sh
+++ b/packaging/tools/makeclient.sh
@@ -150,11 +150,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makeclient_power.sh b/packaging/tools/makeclient_power.sh
index 89591cac234b190f55d144ccf98cb2d5c70a7936..19e24b3dafb7f1f95832e637e181449e4c381faf 100755
--- a/packaging/tools/makeclient_power.sh
+++ b/packaging/tools/makeclient_power.sh
@@ -210,11 +210,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makeclient_pro.sh b/packaging/tools/makeclient_pro.sh
index 599c91fbf082955887c677b750aa12f946c0890b..4a0b033d30e6478f37a62f9cc896aee0903d39c9 100755
--- a/packaging/tools/makeclient_pro.sh
+++ b/packaging/tools/makeclient_pro.sh
@@ -172,11 +172,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makeclient_tq.sh b/packaging/tools/makeclient_tq.sh
index 03d9b13059daadfdc7207c78b6f89cae321f25ac..1cc7003661a7491b1df625916dd289de32434ee9 100755
--- a/packaging/tools/makeclient_tq.sh
+++ b/packaging/tools/makeclient_tq.sh
@@ -177,11 +177,6 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
fi
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bunlded dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index f0c25208529768fb387262a668381a57e34f51ac..05b49ff6a9599c6050d2ccad778f63d285981420 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -35,12 +35,12 @@ fi
if [ "$pagMode" == "lite" ]; then
strip ${build_dir}/bin/taosd
strip ${build_dir}/bin/taos
- # lite version doesn't include blm3, which will lead to no restful interface
+ # lite version doesn't include taosadapter, which will lead to no restful interface
bin_files="${build_dir}/bin/taosd ${build_dir}/bin/taos ${script_dir}/remove.sh ${script_dir}/startPre.sh"
else
bin_files="${build_dir}/bin/taosd \
${build_dir}/bin/taos \
- ${build_dir}/bin/blm3 \
+ ${build_dir}/bin/taosadapter \
${build_dir}/bin/taosdump \
${build_dir}/bin/taosdemo \
${build_dir}/bin/tarbitrator\
@@ -78,7 +78,7 @@ mkdir -p ${install_dir}
mkdir -p ${install_dir}/inc && cp ${header_files} ${install_dir}/inc
mkdir -p ${install_dir}/cfg && cp ${cfg_dir}/taos.cfg ${install_dir}/cfg/taos.cfg
-[ -f ${cfg_dir}/blm.toml ] && cp ${cfg_dir}/blm.toml ${install_dir}/cfg/blm.toml
+[ -f ${cfg_dir}/taosadapter.toml ] && cp ${cfg_dir}/taosadapter.toml ${install_dir}/cfg/taosadapter.toml
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/taosd.deb
@@ -195,11 +195,6 @@ connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if you want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makepkg_power.sh b/packaging/tools/makepkg_power.sh
index dbb7e6887fa1b0f96ea68f1c880ee77ced0858bd..65200ddd047358f92f8e3a612c08eedb60053311 100755
--- a/packaging/tools/makepkg_power.sh
+++ b/packaging/tools/makepkg_power.sh
@@ -81,7 +81,7 @@ else
# bin_files="${build_dir}/bin/powerd ${build_dir}/bin/power ${build_dir}/bin/powerdemo ${build_dir}/bin/tarbitrator ${script_dir}/remove_power.sh ${script_dir}/set_core.sh"
cp ${build_dir}/bin/taos ${install_dir}/bin/power
cp ${build_dir}/bin/taosd ${install_dir}/bin/powerd
- cp ${build_dir}/bin/blm3 ${install_dir}/bin/blm3 ||:
+ cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||:
cp ${script_dir}/remove_power.sh ${install_dir}/bin
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/powerdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/powerdump
@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/makepkg_pro.sh b/packaging/tools/makepkg_pro.sh
index 1668838be0522bc02ab027b6ee4ac6ff250fefa2..457cb0de6f02f7000dc7437cde61bfec28c7205c 100755
--- a/packaging/tools/makepkg_pro.sh
+++ b/packaging/tools/makepkg_pro.sh
@@ -62,7 +62,7 @@ else
fi
cp ${build_dir}/bin/taos ${install_dir}/bin/prodbc
cp ${build_dir}/bin/taosd ${install_dir}/bin/prodbs
-cp ${build_dir}/bin/blm3 ${install_dir}/bin/blm3 ||:
+cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||:
cp ${script_dir}/remove_pro.sh ${install_dir}/bin
chmod a+x ${install_dir}/bin/* || :
@@ -154,11 +154,6 @@ mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo
#if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
# cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
-# if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
-# cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
-# else
-# echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
-# fi
# if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
# cp -r ${connector_dir}/go ${install_dir}/connector
# else
diff --git a/packaging/tools/makepkg_tq.sh b/packaging/tools/makepkg_tq.sh
index 416a3f60a4a57d6afa34d1d8f931a7efd68d6958..07032379d7e4bab2636f3685b6edb620780a124a 100755
--- a/packaging/tools/makepkg_tq.sh
+++ b/packaging/tools/makepkg_tq.sh
@@ -82,7 +82,7 @@ else
cp ${build_dir}/bin/taos ${install_dir}/bin/tq
cp ${build_dir}/bin/taosd ${install_dir}/bin/tqd
cp ${script_dir}/remove_tq.sh ${install_dir}/bin
- cp ${build_dir}/bin/blm3 ${install_dir}/bin/blm3 ||:
+ cp ${build_dir}/bin/taosadapter ${install_dir}/bin/taosadapter ||:
cp ${build_dir}/bin/taosdemo ${install_dir}/bin/tqdemo
cp ${build_dir}/bin/taosdump ${install_dir}/bin/tqdump
cp ${build_dir}/bin/tarbitrator ${install_dir}/bin
@@ -168,11 +168,6 @@ mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector ||:
- if [ -d "${connector_dir}/grafanaplugin/dist" ]; then
- cp -r ${connector_dir}/grafanaplugin/dist ${install_dir}/connector/grafanaplugin
- else
- echo "WARNING: grafanaplugin bundled dir not found, please check if want to use it!"
- fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index a4bd8a8f28672273a913a6390855c85bcc2d5136..c3db7e417adb11b92d55464b69c715e3aee2d6bb 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -2,7 +2,7 @@
#
# This file is used to install tdengine rpm package on centos systems. The operating system
# is required to use systemd to manage services at boot
-#set -x
+# set -x
iplist=""
serverFqdn=""
@@ -64,9 +64,9 @@ else
service_mod=2
fi
-function kill_blm3() {
-# ${csudo} pkill -f blm3 || :
- pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}')
+function kill_taosadapter() {
+# ${csudo} pkill -f taosadapter || :
+ pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
@@ -86,6 +86,24 @@ function install_include() {
${csudo} ln -s ${inc_dir}/taoserror.h ${inc_link_dir}/taoserror.h
}
+function install_avro_lib() {
+ ${csudo} rm -f ${lib_link_dir}/libavro* || :
+ ${csudo} rm -f ${lib64_link_dir}/libavro* || :
+
+ if [[ -f ${lib_dir}/libavro.so.23.0.0 ]]; then
+ ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23.0.0
+ ${csudo} ln -s ${lib_link_dir}/libavro.so.23.0.0 ${lib_link_dir}/libavro.so.23
+ ${csudo} ln -s ${lib_link_dir}/libavro.so.23 ${lib_link_dir}/libavro.so
+
+ if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libavro.so ]]; then
+ ${csudo} ln -s ${lib_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23.0.0 || :
+ ${csudo} ln -s ${lib64_link_dir}/libavro.so.23.0.0 ${lib64_link_dir}/libavro.so.23 || :
+ ${csudo} ln -s ${lib64_link_dir}/libavro.so.23 ${lib64_link_dir}/libavro.so || :
+ fi
+ fi
+
+ ${csudo} ldconfig
+}
function install_lib() {
${csudo} rm -f ${lib_link_dir}/libtaos* || :
${csudo} rm -f ${lib64_link_dir}/libtaos* || :
@@ -97,13 +115,15 @@ function install_lib() {
${csudo} ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
+
+ ${csudo} ldconfig
}
function install_bin() {
# Remove links
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/blm3 || :
+ ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
@@ -114,7 +134,7 @@ function install_bin() {
#Make link
[ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || :
[ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || :
- [ -x ${bin_dir}/blm3 ] && ${csudo} ln -s ${bin_dir}/blm3 ${bin_link_dir}/blm3 || :
+ [ -x ${bin_dir}/taosadapter ] && ${csudo} ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || :
[ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || :
[ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || :
[ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || :
@@ -127,7 +147,7 @@ function add_newHostname_to_hosts() {
iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}')
arr=($iphost)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$localIp" ]]; then
return
@@ -182,7 +202,7 @@ function is_correct_ipaddr() {
IFS=" "
arr=($iplist)
IFS="$OLD_IFS"
- for s in ${arr[@]}
+ for s in "${arr[@]}"
do
if [[ "$s" == "$newIp" ]]; then
return 0
@@ -271,23 +291,20 @@ function local_fqdn_check() {
fi
}
-function install_blm3_config() {
- if [ ! -f "${cfg_install_dir}/blm.toml" ]; then
+function install_taosadapter_config() {
+ if [ ! -f "${cfg_install_dir}/taosadapter.toml" ]; then
[ ! -d %{cfg_install_dir} ] &&
${csudo} ${csudo} mkdir -p ${cfg_install_dir}
- [ -f ${cfg_dir}/blm.toml ] && ${csudo} cp ${cfg_dir}/blm.toml ${cfg_install_dir}
- [ -f ${cfg_install_dir}/blm.toml ] &&
- ${csudo} chmod 644 ${cfg_install_dir}/blm.toml
+ [ -f ${cfg_dir}/taosadapter.toml ] && ${csudo} cp ${cfg_dir}/taosadapter.toml ${cfg_install_dir}
+ [ -f ${cfg_install_dir}/taosadapter.toml ] &&
+ ${csudo} chmod 644 ${cfg_install_dir}/taosadapter.toml
fi
- # restore the backup standard input, and turn off 6
- exec 0<&6 6<&-
+ [ -f ${cfg_dir}/taosadapter.toml ] &&
+ ${csudo} mv ${cfg_dir}/taosadapter.toml ${cfg_dir}/taosadapter.toml.new
- [ -f ${cfg_dir}/blm.toml ] &&
- ${csudo} mv ${cfg_dir}/blm.toml ${cfg_dir}/blm.toml.org
-
- [ -f ${cfg_install_dir}/blm.toml ] &&
- ${csudo} ln -s ${cfg_install_dir}/blm.toml ${cfg_dir}
+ [ -f ${cfg_install_dir}/taosadapter.toml ] &&
+ ${csudo} ln -s ${cfg_install_dir}/taosadapter.toml ${cfg_dir}
}
function install_config() {
@@ -305,7 +322,7 @@ function install_config() {
# restore the backup standard input, and turn off 6
exec 0<&6 6<&-
- ${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.org
+ ${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.new
${csudo} ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir}
#FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
#FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)"
@@ -427,8 +444,8 @@ function install_service_on_systemd() {
${csudo} bash -c "echo '[Unit]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Description=TDengine server service' >> ${taosd_service_config}"
- ${csudo} bash -c "echo 'After=network-online.target' >> ${taosd_service_config}"
- ${csudo} bash -c "echo 'Wants=network-online.target' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'After=network-online.target taosadapter.service' >> ${taosd_service_config}"
+ ${csudo} bash -c "echo 'Wants=network-online.target taosadapter.service' >> ${taosd_service_config}"
${csudo} bash -c "echo >> ${taosd_service_config}"
${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}"
${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}"
@@ -449,6 +466,10 @@ function install_service_on_systemd() {
${csudo} systemctl enable taosd
}
+function install_taosadapter_service() {
+ [ -f ${cfg_dir}/taosadapter.service ] && ${csudo} cp ${cfg_dir}/taosadapter.service ${service_config_dir}
+}
+
function install_service() {
if ((${service_mod}==0)); then
install_service_on_systemd
@@ -456,7 +477,7 @@ function install_service() {
install_service_on_sysvinit
else
# manual start taosd
- kill_blm3
+ kill_taosadapter
kill_taosd
fi
}
@@ -477,10 +498,12 @@ function install_TDengine() {
# Install include, lib, binary and service
install_include
install_lib
+ install_avro_lib
install_bin
- install_service
install_config
- install_blm3_config
+ install_taosadapter_config
+ install_taosadapter_service
+ install_service
# Ask if to start the service
#echo
diff --git a/packaging/tools/preun.sh b/packaging/tools/preun.sh
index 16a892d26c1d11cddf5dc15758e784c9ff268822..d2d36364208f23492d2ba6aefa783c85ad6d5572 100755
--- a/packaging/tools/preun.sh
+++ b/packaging/tools/preun.sh
@@ -43,8 +43,8 @@ else
service_mod=2
fi
-function kill_blm3() {
- pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}')
+function kill_taosadapter() {
+ pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
@@ -58,6 +58,12 @@ function kill_taosd() {
}
function clean_service_on_systemd() {
+ taosadapter_service_config="${service_config_dir}/taosadapter.service"
+ if systemctl is-active --quiet taosadapter; then
+ echo "taosadapter is running, stopping it..."
+ ${csudo} systemctl stop taosadapter &> /dev/null || echo &> /dev/null
+ fi
+
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
if systemctl is-active --quiet ${taos_service_name}; then
@@ -67,6 +73,9 @@ function clean_service_on_systemd() {
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config}
+
+ [ -f ${taosadapter_service_config} ] && ${csudo} rm -f ${taosadapter_service_config}
+
}
function clean_service_on_sysvinit() {
@@ -100,7 +109,7 @@ function clean_service() {
clean_service_on_sysvinit
else
# must manual stop taosd
- kill_blm3
+ kill_taosadapter
kill_taosd
fi
}
@@ -111,11 +120,11 @@ clean_service
# Remove all links
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
-${csudo} rm -f ${bin_link_dir}/blm3 || :
+${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/set_core || :
-${csudo} rm -f ${cfg_link_dir}/* || :
+${csudo} rm -f ${cfg_link_dir}/*.new || :
${csudo} rm -f ${inc_link_dir}/taos.h || :
${csudo} rm -f ${inc_link_dir}/taoserror.h || :
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
@@ -125,7 +134,7 @@ ${csudo} rm -f ${log_link_dir} || :
${csudo} rm -f ${data_link_dir} || :
if ((${service_mod}==2)); then
- kill_blm3
+ kill_taosadapter
kill_taosd
fi
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index f4c3350b7861ce8c027b54641e56fa99f87afbb8..07a8362b2c45676986513020da668ff9235f00fa 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -54,8 +54,8 @@ else
service_mod=2
fi
-function kill_blm3() {
- pid=$(ps -ef | grep "blm3" | grep -v "grep" | awk '{print $2}')
+function kill_taosadapter() {
+ pid=$(ps -ef | grep "taosadapter" | grep -v "grep" | awk '{print $2}')
if [ -n "$pid" ]; then
${csudo} kill -9 $pid || :
fi
@@ -78,7 +78,7 @@ function clean_bin() {
# Remove link
${csudo} rm -f ${bin_link_dir}/taos || :
${csudo} rm -f ${bin_link_dir}/taosd || :
- ${csudo} rm -f ${bin_link_dir}/blm3 || :
+ ${csudo} rm -f ${bin_link_dir}/taosadapter || :
${csudo} rm -f ${bin_link_dir}/taosdemo || :
${csudo} rm -f ${bin_link_dir}/taosdump || :
${csudo} rm -f ${bin_link_dir}/rmtaos || :
@@ -111,12 +111,14 @@ function clean_log() {
function clean_service_on_systemd() {
taosd_service_config="${service_config_dir}/${taos_service_name}.service"
+ taosadapter_service_config="${service_config_dir}/taosadapter.service"
if systemctl is-active --quiet ${taos_service_name}; then
echo "TDengine taosd is running, stopping it..."
${csudo} systemctl stop ${taos_service_name} &> /dev/null || echo &> /dev/null
fi
${csudo} systemctl disable ${taos_service_name} &> /dev/null || echo &> /dev/null
${csudo} rm -f ${taosd_service_config}
+ [ -f ${taosadapter_service_config} ] && ${sudo} rm -f ${taosadapter_service_config}
tarbitratord_service_config="${service_config_dir}/${tarbitrator_service_name}.service"
if systemctl is-active --quiet ${tarbitrator_service_name}; then
@@ -191,7 +193,7 @@ function clean_service() {
clean_service_on_sysvinit
else
# must manual stop taosd
- kill_blm3
+ kill_taosadapter
kill_taosd
kill_tarbitrator
fi
diff --git a/packaging/tools/startPre.sh b/packaging/tools/startPre.sh
index 2f466f94f08555b5c8cf8d5b4abe459f52ece49f..8a0ab11a4d37ffb9ad244faa2946cbbf10ce2026 100755
--- a/packaging/tools/startPre.sh
+++ b/packaging/tools/startPre.sh
@@ -9,8 +9,8 @@ line=`grep StartLimitBurst ${taosd}`
num=${line##*=}
#echo "burst num: ${num}"
-startSeqFile=/usr/local/taos/.startSeq
-recordFile=/usr/local/taos/.startRecord
+startSeqFile=/var/log/taos/.startSeq
+recordFile=/var/log/taos/.startRecord
startSeq=0
@@ -48,4 +48,3 @@ if [ ${coreFlag} = "unlimited" ];then
fi
fi
-/usr/bin/blm3 &
diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml
index 28515f6c63c98f741d84aa11f92b9ca9f7ad3691..64e3af498cedd25dea90055426110522bc4a4086 100644
--- a/snap/snapcraft.yaml
+++ b/snap/snapcraft.yaml
@@ -1,6 +1,6 @@
name: tdengine
-base: core18
-version: '2.3.0.0'
+base: core20
+version: '2.3.1.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@@ -39,14 +39,17 @@ parts:
- taoswrapper.sh
tdengine:
+ plugin: cmake
source: .
source-type: local
- plugin: cmake
build-packages:
- gcc
- g++
- make
- cmake
+ cmake-parameters:
+ - -DCMAKE_BUILD_TYPE=Release
+ - -DBUILD_HTTP=true
override-build: |
snapcraftctl build
if [ ! -d $SNAPCRAFT_STAGE/usr ]; then
diff --git a/src/client/inc/tscParseLine.h b/src/client/inc/tscParseLine.h
index 939ccfb613968620ab1447a7a833277743accb43..fef55011b0faec1d15876764b3fd9808ec2b4e39 100644
--- a/src/client/inc/tscParseLine.h
+++ b/src/client/inc/tscParseLine.h
@@ -64,9 +64,9 @@ typedef struct {
SMLTimeStampType tsType;
SHashObj* smlDataToSchema;
- int64_t affectedRows;
+ int32_t affectedRows;
} SSmlLinesInfo;
-
+char* addEscapeCharToString(char *str, int32_t len);
int tscSmlInsert(TAOS* taos, TAOS_SML_DATA_POINT* points, int numPoint, SSmlLinesInfo* info);
bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info);
bool isValidInteger(char *str);
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index 11ae6ae2704050850e7d79f8ee8c36ce207158e6..04ee1b7953946565007e8a30f43fa4a600e63b19 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -155,6 +155,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
+bool tscGroupbyTag(SQueryInfo* pQueryInfo);
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo);
@@ -251,6 +252,7 @@ void tscColumnListCopyAll(SArray* dst, const SArray* src);
void convertQueryResult(SSqlRes* pRes, SQueryInfo* pQueryInfo, uint64_t objId, bool convertNchar);
void tscDequoteAndTrimToken(SStrToken* pToken);
+void tscRmEscapeAndTrimToken(SStrToken* pToken);
int32_t tscValidateName(SStrToken* pToken, bool escapeEnabled, bool *dbIncluded);
void tscIncStreamExecutionCount(void* pStream);
diff --git a/src/client/jni/com_alibaba_datax_plugin_writer_JniConnection.h b/src/client/jni/com_alibaba_datax_plugin_writer_JniConnection.h
new file mode 100644
index 0000000000000000000000000000000000000000..61f0e6eb9ce4c15c8c68d8375ad853a7505588ce
--- /dev/null
+++ b/src/client/jni/com_alibaba_datax_plugin_writer_JniConnection.h
@@ -0,0 +1,81 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+/* Header for class com_alibaba_datax_plugin_writer_JniConnection */
+
+#ifndef _Included_com_alibaba_datax_plugin_writer_JniConnection
+#define _Included_com_alibaba_datax_plugin_writer_JniConnection
+#ifdef __cplusplus
+extern "C" {
+#endif
+#undef com_alibaba_datax_plugin_writer_JniConnection_JNI_NULL_POINTER
+#define com_alibaba_datax_plugin_writer_JniConnection_JNI_NULL_POINTER 0LL
+#undef com_alibaba_datax_plugin_writer_JniConnection_JNI_SUCCESSFUL
+#define com_alibaba_datax_plugin_writer_JniConnection_JNI_SUCCESSFUL 0L
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: initImp
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_initImp
+ (JNIEnv *, jclass, jstring);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: setOptions
+ * Signature: (ILjava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_setOptions
+ (JNIEnv *, jclass, jint, jstring);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: connectImp
+ * Signature: (Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;Ljava/lang/String;)J
+ */
+JNIEXPORT jlong JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_connectImp
+ (JNIEnv *, jobject, jstring, jint, jstring, jstring, jstring);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: getErrCodeImp
+ * Signature: (JJ)I
+ */
+JNIEXPORT jint JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_getErrCodeImp
+ (JNIEnv *, jobject, jlong, jlong);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: getErrMsgImp
+ * Signature: (J)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_getErrMsgImp
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: freeResultSetImp
+ * Signature: (JJ)V
+ */
+JNIEXPORT void JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_freeResultSetImp
+ (JNIEnv *, jobject, jlong, jlong);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: closeConnectionImp
+ * Signature: (J)I
+ */
+JNIEXPORT jint JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_closeConnectionImp
+ (JNIEnv *, jobject, jlong);
+
+/*
+ * Class: com_alibaba_datax_plugin_writer_JniConnection
+ * Method: insertOpentsdbJson
+ * Signature: (Ljava/lang/String;J)J
+ */
+JNIEXPORT jlong JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_insertOpentsdbJson
+ (JNIEnv *, jobject, jstring, jlong);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
index 61ae5082f31cd9129a3cec1eaa1e0552ada7993b..1038af5abb1d00b14b1c54d2f96522647b71178b 100644
--- a/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
+++ b/src/client/jni/com_taosdata_jdbc_TSDBJNIConnector.h
@@ -239,6 +239,14 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsImp
(JNIEnv *, jobject, jlong, jstring, jint, jbyteArray, jbyteArray, jbyteArray, jbyteArray, jlong);
+/*
+ * Class: com_taosdata_jdbc_TSDBJNIConnector
+ * Method: insertLinesImp
+ * Signature: ([Ljava/lang/String;JII)I
+ */
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp
+ (JNIEnv *, jobject, jobjectArray, jlong, jint, jint);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/client/jni/jniCommon.h b/src/client/jni/jniCommon.h
new file mode 100644
index 0000000000000000000000000000000000000000..78724eed319b8b414b12fc46e3d31899370ba39d
--- /dev/null
+++ b/src/client/jni/jniCommon.h
@@ -0,0 +1,87 @@
+#include
+
+#ifndef TDENGINE_JNICOMMON_H
+#define TDENGINE_JNICOMMON_H
+
+#define jniFatal(...) \
+ { \
+ if (jniDebugFlag & DEBUG_FATAL) { \
+ taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniError(...) \
+ { \
+ if (jniDebugFlag & DEBUG_ERROR) { \
+ taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniWarn(...) \
+ { \
+ if (jniDebugFlag & DEBUG_WARN) { \
+ taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniInfo(...) \
+ { \
+ if (jniDebugFlag & DEBUG_INFO) { \
+ taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniDebug(...) \
+ { \
+ if (jniDebugFlag & DEBUG_DEBUG) { \
+ taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+#define jniTrace(...) \
+ { \
+ if (jniDebugFlag & DEBUG_TRACE) { \
+ taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
+ } \
+ }
+
+extern jclass g_arrayListClass;
+extern jmethodID g_arrayListConstructFp;
+extern jmethodID g_arrayListAddFp;
+
+extern jclass g_metadataClass;
+extern jmethodID g_metadataConstructFp;
+extern jfieldID g_metadataColtypeField;
+extern jfieldID g_metadataColnameField;
+extern jfieldID g_metadataColsizeField;
+extern jfieldID g_metadataColindexField;
+
+extern jclass g_rowdataClass;
+extern jmethodID g_rowdataConstructor;
+extern jmethodID g_rowdataClearFp;
+extern jmethodID g_rowdataSetBooleanFp;
+extern jmethodID g_rowdataSetByteFp;
+extern jmethodID g_rowdataSetShortFp;
+extern jmethodID g_rowdataSetIntFp;
+extern jmethodID g_rowdataSetLongFp;
+extern jmethodID g_rowdataSetFloatFp;
+extern jmethodID g_rowdataSetDoubleFp;
+extern jmethodID g_rowdataSetStringFp;
+extern jmethodID g_rowdataSetTimestampFp;
+extern jmethodID g_rowdataSetByteArrayFp;
+
+extern jmethodID g_blockdataSetByteArrayFp;
+extern jmethodID g_blockdataSetNumOfRowsFp;
+extern jmethodID g_blockdataSetNumOfColsFp;
+
+#define JNI_SUCCESS 0
+#define JNI_TDENGINE_ERROR -1
+#define JNI_CONNECTION_NULL -2
+#define JNI_RESULT_SET_NULL -3
+#define JNI_NUM_OF_FIELDS_0 -4
+#define JNI_SQL_NULL -5
+#define JNI_FETCH_END -6
+#define JNI_OUT_OF_MEMORY -7
+
+extern JavaVM *g_vm;
+
+void jniGetGlobalMethod(JNIEnv *env);
+
+int32_t check_for_params(jobject jobj, jlong conn, jlong res);
+
+#endif // TDENGINE_JNICOMMON_H
diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c
index 50fe51e7dac0fc6f5ca4cc0458670f3962942f0e..5127aaf665b8059a12ef0985140c2a01ea328bfa 100644
--- a/src/client/src/TSDBJNIConnector.c
+++ b/src/client/src/TSDBJNIConnector.c
@@ -17,46 +17,9 @@
#include "taos.h"
#include "tlog.h"
#include "tscUtil.h"
-#include "tscParseLine.h"
#include "com_taosdata_jdbc_TSDBJNIConnector.h"
-
-#define jniFatal(...) \
- { \
- if (jniDebugFlag & DEBUG_FATAL) { \
- taosPrintLog("JNI FATAL ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
- } \
- }
-#define jniError(...) \
- { \
- if (jniDebugFlag & DEBUG_ERROR) { \
- taosPrintLog("JNI ERROR ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
- } \
- }
-#define jniWarn(...) \
- { \
- if (jniDebugFlag & DEBUG_WARN) { \
- taosPrintLog("JNI WARN ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
- } \
- }
-#define jniInfo(...) \
- { \
- if (jniDebugFlag & DEBUG_INFO) { \
- taosPrintLog("JNI ", tscEmbedded ? 255 : jniDebugFlag, __VA_ARGS__); \
- } \
- }
-#define jniDebug(...) \
- { \
- if (jniDebugFlag & DEBUG_DEBUG) { \
- taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
- } \
- }
-#define jniTrace(...) \
- { \
- if (jniDebugFlag & DEBUG_TRACE) { \
- taosPrintLog("JNI ", jniDebugFlag, __VA_ARGS__); \
- } \
- }
+#include "jniCommon.h"
int __init = 0;
@@ -91,16 +54,7 @@ jmethodID g_blockdataSetByteArrayFp;
jmethodID g_blockdataSetNumOfRowsFp;
jmethodID g_blockdataSetNumOfColsFp;
-#define JNI_SUCCESS 0
-#define JNI_TDENGINE_ERROR -1
-#define JNI_CONNECTION_NULL -2
-#define JNI_RESULT_SET_NULL -3
-#define JNI_NUM_OF_FIELDS_0 -4
-#define JNI_SQL_NULL -5
-#define JNI_FETCH_END -6
-#define JNI_OUT_OF_MEMORY -7
-
-static void jniGetGlobalMethod(JNIEnv *env) {
+void jniGetGlobalMethod(JNIEnv *env) {
// make sure init function executed once
switch (atomic_val_compare_exchange_32(&__init, 0, 1)) {
case 0:
@@ -159,7 +113,7 @@ static void jniGetGlobalMethod(JNIEnv *env) {
jniDebug("native method register finished");
}
-static int32_t check_for_params(jobject jobj, jlong conn, jlong res) {
+int32_t check_for_params(jobject jobj, jlong conn, jlong res) {
if ((TAOS *)conn == NULL) {
jniError("jobj:%p, connection is closed", jobj);
return JNI_CONNECTION_NULL;
@@ -219,26 +173,8 @@ JNIEXPORT jobject createTSDBException(JNIEnv *env, int code, char *msg) {
return exception_obj;
}
-/*
- * Class: com_taosdata_jdbc_TSDBJNIConnector
- * Method: setConfigImp
- * Signature: (Ljava/lang/String;)Lcom/taosdata/jdbc/TSDBException;
- */
JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setConfigImp(JNIEnv *env, jclass jobj,
jstring config) {
- /*
- if (config == NULL) {
- jniDebug("config value is null");
- return -1;
- }
-
- const char *cfg = (*env)->GetStringUTFChars(env, config, NULL);
- if (!cfg) {
- return -1;
- }
- return 0;
- */
-
if (config == NULL) {
char *msg = "config value is null";
jniDebug("config value is null");
@@ -254,7 +190,7 @@ JNIEXPORT jobject JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setConfigImp(J
setConfRet result = taos_set_config(cfg);
int code = result.retCode;
- char * msg = result.retMsg;
+ char *msg = result.retMsg;
return createTSDBException(env, code, msg);
}
@@ -424,7 +360,7 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getErrMsgImp(J
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp(JNIEnv *env, jobject jobj, jlong con,
jlong tres) {
- TAOS * tscon = (TAOS *)con;
+ TAOS *tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, tres);
if (code != JNI_SUCCESS) {
return code;
@@ -467,7 +403,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_freeResultSetImp(
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsImp(JNIEnv *env, jobject jobj, jlong con,
jlong res) {
- TAOS * tscon = (TAOS *)con;
+ TAOS *tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
@@ -483,13 +419,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getSchemaMetaDataImp(JNIEnv *env, jobject jobj,
jlong con, jlong res,
jobject arrayListObj) {
- TAOS * tscon = (TAOS *)con;
+ TAOS *tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
- TAOS_RES * tres = (TAOS_RES *)res;
+ TAOS_RES *tres = (TAOS_RES *)res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t num_fields = taos_num_fields(tres);
@@ -626,13 +562,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchBlockImp(JNIEnv *env, jobject jobj, jlong con,
jlong res, jobject rowobj) {
- TAOS * tscon = (TAOS *)con;
+ TAOS *tscon = (TAOS *)con;
int32_t code = check_for_params(jobj, con, res);
if (code != JNI_SUCCESS) {
return code;
}
- TAOS_RES * tres = (TAOS_RES *)res;
+ TAOS_RES *tres = (TAOS_RES *)res;
TAOS_FIELD *fields = taos_fetch_fields(tres);
int32_t numOfFields = taos_num_fields(tres);
@@ -1021,7 +957,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
}
const char *name = (*env)->GetStringUTFChars(env, tableName, NULL);
- char * curTags = tagsData;
+ char *curTags = tagsData;
TAOS_BIND *tagsBind = calloc(numOfTags, sizeof(TAOS_BIND));
for (int32_t i = 0; i < numOfTags; ++i) {
@@ -1052,7 +988,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI
return JNI_SUCCESS;
}
-JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(JNIEnv *env, jobject jobj,
jobjectArray lines, jlong conn,
jint protocol, jint precision) {
TAOS *taos = (TAOS *)conn;
@@ -1083,9 +1019,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_insertLinesImp(J
tfree(c_lines);
if (code != TSDB_CODE_SUCCESS) {
jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, taos, tstrerror(code), taos_errstr(result));
-
+ taos_free_result((void *)result);
return JNI_TDENGINE_ERROR;
}
+ taos_free_result((void *)result);
- return (jlong)result;
+ return JNI_SUCCESS;
}
diff --git a/src/client/src/dataxJniConnection.c b/src/client/src/dataxJniConnection.c
new file mode 100644
index 0000000000000000000000000000000000000000..3cb6551268f4ac87a13f3983920e6a0130592427
--- /dev/null
+++ b/src/client/src/dataxJniConnection.c
@@ -0,0 +1,203 @@
+#include "os.h"
+#include "taos.h"
+#include "tlog.h"
+#include "tscUtil.h"
+
+#include "com_alibaba_datax_plugin_writer_JniConnection.h"
+#include "jniCommon.h"
+
+JNIEXPORT void JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_initImp(JNIEnv *env, jobject jobj,
+ jstring jconfigDir) {
+ if (jconfigDir != NULL) {
+ const char *confDir = (*env)->GetStringUTFChars(env, jconfigDir, NULL);
+ if (confDir && strlen(confDir) != 0) {
+ tstrncpy(configDir, confDir, TSDB_FILENAME_LEN);
+ }
+ (*env)->ReleaseStringUTFChars(env, jconfigDir, confDir);
+ }
+
+ jniDebug("jni initialized successfully, config directory: %s", configDir);
+}
+
+JNIEXPORT jint JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_setOptions(JNIEnv *env, jobject jobj,
+ jint optionIndex,
+ jstring optionValue) {
+ if (optionValue == NULL) {
+ jniDebug("option index:%d value is null", (int32_t)optionIndex);
+ return 0;
+ }
+
+ int res = 0;
+
+ if (optionIndex == TSDB_OPTION_LOCALE) {
+ const char *locale = (*env)->GetStringUTFChars(env, optionValue, NULL);
+ if (locale && strlen(locale) != 0) {
+ res = taos_options(TSDB_OPTION_LOCALE, locale);
+ jniDebug("set locale to %s, result:%d", locale, res);
+ } else {
+ jniDebug("input locale is empty");
+ }
+ (*env)->ReleaseStringUTFChars(env, optionValue, locale);
+ } else if (optionIndex == TSDB_OPTION_CHARSET) {
+ const char *charset = (*env)->GetStringUTFChars(env, optionValue, NULL);
+ if (charset && strlen(charset) != 0) {
+ res = taos_options(TSDB_OPTION_CHARSET, charset);
+ jniDebug("set character encoding to %s, result:%d", charset, res);
+ } else {
+ jniDebug("input character encoding is empty");
+ }
+ (*env)->ReleaseStringUTFChars(env, optionValue, charset);
+ } else if (optionIndex == TSDB_OPTION_TIMEZONE) {
+ const char *tz1 = (*env)->GetStringUTFChars(env, optionValue, NULL);
+ if (tz1 && strlen(tz1) != 0) {
+ res = taos_options(TSDB_OPTION_TIMEZONE, tz1);
+ jniDebug("set timezone to %s, result:%d", tz1, res);
+ } else {
+ jniDebug("input timezone is empty");
+ }
+ (*env)->ReleaseStringUTFChars(env, optionValue, tz1);
+ } else {
+ jniError("option index:%d is not found", (int32_t)optionIndex);
+ }
+
+ return res;
+}
+
+JNIEXPORT jlong JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_connectImp(JNIEnv *env, jobject jobj,
+ jstring jhost, jint jport,
+ jstring jdbName, jstring juser,
+ jstring jpass) {
+ jlong ret = 0;
+ const char *host = NULL;
+ const char *user = NULL;
+ const char *pass = NULL;
+ const char *dbname = NULL;
+
+ if (jhost != NULL) {
+ host = (*env)->GetStringUTFChars(env, jhost, NULL);
+ }
+
+ if (jdbName != NULL) {
+ dbname = (*env)->GetStringUTFChars(env, jdbName, NULL);
+ }
+
+ if (juser != NULL) {
+ user = (*env)->GetStringUTFChars(env, juser, NULL);
+ }
+
+ if (jpass != NULL) {
+ pass = (*env)->GetStringUTFChars(env, jpass, NULL);
+ }
+
+ if (user == NULL) {
+ jniDebug("jobj:%p, user not specified, use default user %s", jobj, TSDB_DEFAULT_USER);
+ }
+
+ if (pass == NULL) {
+ jniDebug("jobj:%p, pass not specified, use default password", jobj);
+ }
+
+ ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
+ if (ret == 0) {
+ jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
+ (char *)host, (char *)user, (char *)dbname, (int32_t)jport);
+ } else {
+ jniDebug("jobj:%p, conn:%p, connect to database succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
+ (char *)host, (char *)user, (char *)dbname, (int32_t)jport);
+ }
+
+ if (host != NULL) {
+ (*env)->ReleaseStringUTFChars(env, jhost, host);
+ }
+
+ if (dbname != NULL) {
+ (*env)->ReleaseStringUTFChars(env, jdbName, dbname);
+ }
+
+ if (user != NULL) {
+ (*env)->ReleaseStringUTFChars(env, juser, user);
+ }
+
+ if (pass != NULL) {
+ (*env)->ReleaseStringUTFChars(env, jpass, pass);
+ }
+
+ return ret;
+}
+
+JNIEXPORT jint JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_getErrCodeImp(JNIEnv *env, jobject jobj,
+ jlong con, jlong tres) {
+ int32_t code = check_for_params(jobj, con, tres);
+ if (code != JNI_SUCCESS) {
+ return code;
+ }
+
+ return (jint)taos_errno((TAOS_RES *)tres);
+}
+
+JNIEXPORT jstring JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_getErrMsgImp(JNIEnv *env, jobject jobj,
+ jlong tres) {
+ TAOS_RES *pSql = (TAOS_RES *)tres;
+ return (*env)->NewStringUTF(env, (const char *)taos_errstr(pSql));
+}
+
+JNIEXPORT void JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_freeResultSetImp(JNIEnv *env, jobject jobj,
+ jlong con, jlong res) {
+ if ((TAOS *)con == NULL) {
+ jniError("jobj:%p, connection is closed", jobj);
+ }
+ if ((TAOS_RES *)res == NULL) {
+ jniError("jobj:%p, conn:%p, res is null", jobj, (TAOS *)con);
+ }
+ taos_free_result((TAOS_RES *)res);
+ jniDebug("jobj:%p, conn:%p, free resultset:%p", jobj, (TAOS *)con, (void *)res);
+}
+
+JNIEXPORT jint JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_closeConnectionImp(JNIEnv *env, jobject jobj,
+ jlong con) {
+ TAOS *tscon = (TAOS *)con;
+ if (tscon == NULL) {
+ jniError("jobj:%p, connection is already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ } else {
+ jniDebug("jobj:%p, conn:%p, close connection success", jobj, tscon);
+ taos_close(tscon);
+ return JNI_SUCCESS;
+ }
+}
+
+JNIEXPORT jlong JNICALL Java_com_alibaba_datax_plugin_writer_JniConnection_insertOpentsdbJson(JNIEnv *env, jobject jobj,
+ jstring json, jlong con) {
+ // check connection
+ TAOS *conn = (TAOS *)con;
+ if (conn == NULL) {
+ jniError("jobj:%p, connection already closed", jobj);
+ return JNI_CONNECTION_NULL;
+ }
+ // java.lang.String -> char *
+ char *payload = NULL;
+ if (json != NULL) {
+ payload = (char *)(*env)->GetStringUTFChars(env, json, NULL);
+ }
+ // check payload
+ if (payload == NULL) {
+ jniDebug("jobj:%p, invalid argument: opentsdb insert json is NULL", jobj);
+ return JNI_SQL_NULL;
+ }
+ // schemaless insert
+ char *payload_arr[1];
+ payload_arr[0] = payload;
+ TAOS_RES *result;
+ result = taos_schemaless_insert(conn, payload_arr, 0, TSDB_SML_JSON_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
+
+ int code = taos_errno(result);
+ if (code != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, conn:%p, code:%s, msg:%s", jobj, conn, tstrerror(code), taos_errstr(result));
+ } else {
+ int32_t affectRows = taos_affected_rows(result);
+ jniDebug("jobj:%p, conn:%p, code:%s, affect rows:%d", jobj, conn, tstrerror(code), affectRows);
+ }
+
+ (*env)->ReleaseStringUTFChars(env, json, payload);
+ return (jlong)result;
+}
diff --git a/src/client/src/tscGlobalmerge.c b/src/client/src/tscGlobalmerge.c
index 14e426ee69f1b11fe09ef23d66190c75a2628e10..0b660c592c84eb4605a1fb76afd3b180fc5daa07 100644
--- a/src/client/src/tscGlobalmerge.c
+++ b/src/client/src/tscGlobalmerge.c
@@ -364,7 +364,8 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SQueryInfo*
SExprInfo* pExprInfo = tscExprGet(pQueryInfo, j);
int32_t functionId = pExprInfo->base.functionId;
- if (pColIndex->colId == pExprInfo->base.colInfo.colId && (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG)) {
+ if (pColIndex->colId == pExprInfo->base.colInfo.colId &&
+ (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ)) {
orderColIndexList[i] = j;
break;
}
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index da51961d0ce8cd1a73cbef3272bc4d4471858cdc..c3c65018a50aea8e7f36d89c15c6b7faa12f2047 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -358,9 +358,13 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) {
int num_fields = taos_num_fields(pSql);
TAOS_FIELD *fields = taos_fetch_fields(pSql);
- char buf[TSDB_COL_NAME_LEN + 16];
for (int i = 0; i < num_fields; i++) {
- memset(buf, 0, sizeof(buf));
+ char *buf = calloc(1, lengths[i] + 1);
+ if (buf == NULL) {
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+
+ memset(buf, 0, lengths[i] + 1);
int32_t ret = tscGetNthFieldResult(row, fields, lengths, i, buf);
if (i == 0) {
@@ -373,10 +377,13 @@ static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) {
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s,", buf);
}
+
+ free(buf);
+
if (i == num_fields - 1) {
sprintf(result + strlen(result) - 1, "%s", ")");
}
- }
+ }
if (0 == strlen(result)) {
return TSDB_CODE_TSC_INVALID_TABLE_NAME;
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index da03ed40f02c667c474f3c10a648cb1808667835..d7ceee630217acc72f8cebc464b3e38aaf440a4e 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -762,35 +762,32 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
if (!dataBuf->ordered) {
char *pBlockData = pBlocks->data;
qsort(pBlockData, pBlocks->numOfRows, dataBuf->rowSize, rowDataCompar);
+ dataBuf->ordered = true;
- int32_t i = 0;
- int32_t j = 1;
-
- while (j < pBlocks->numOfRows) {
- TSKEY ti = *(TSKEY *)(pBlockData + dataBuf->rowSize * i);
- TSKEY tj = *(TSKEY *)(pBlockData + dataBuf->rowSize * j);
+ if(tsClientMerge) {
+ int32_t i = 0;
+ int32_t j = 1;
+ while (j < pBlocks->numOfRows) {
+ TSKEY ti = *(TSKEY *)(pBlockData + dataBuf->rowSize * i);
+ TSKEY tj = *(TSKEY *)(pBlockData + dataBuf->rowSize * j);
- if (ti == tj) {
- if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
- memmove(pBlockData + dataBuf->rowSize * i, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
+ if (ti == tj) {
+ if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
+ memmove(pBlockData + dataBuf->rowSize * i, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
+ }
+ ++j;
+ continue;
}
+ int32_t nextPos = (++i);
+ if (nextPos != j) {
+ memmove(pBlockData + dataBuf->rowSize * nextPos, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
+ }
++j;
- continue;
- }
-
- int32_t nextPos = (++i);
- if (nextPos != j) {
- memmove(pBlockData + dataBuf->rowSize * nextPos, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
- }
-
- ++j;
+ }
+ pBlocks->numOfRows = i + 1;
+ dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
-
- dataBuf->ordered = true;
-
- pBlocks->numOfRows = i + 1;
- dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
dataBuf->prevTS = INT64_MIN;
@@ -836,32 +833,33 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
if (!dataBuf->ordered) {
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar);
+ dataBuf->ordered = true;
- pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
- int32_t i = 0;
- int32_t j = 1;
- while (j < nRows) {
- TSKEY ti = (pBlkKeyTuple + i)->skey;
- TSKEY tj = (pBlkKeyTuple + j)->skey;
-
- if (ti == tj) {
- if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
- memmove(pBlkKeyTuple + i, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
+ if(tsClientMerge) {
+ pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
+ int32_t i = 0;
+ int32_t j = 1;
+ while (j < nRows) {
+ TSKEY ti = (pBlkKeyTuple + i)->skey;
+ TSKEY tj = (pBlkKeyTuple + j)->skey;
+
+ if (ti == tj) {
+ if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
+ memmove(pBlkKeyTuple + i, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
+ }
+
+ ++j;
+ continue;
}
+ int32_t nextPos = (++i);
+ if (nextPos != j) {
+ memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
+ }
++j;
- continue;
- }
-
- int32_t nextPos = (++i);
- if (nextPos != j) {
- memmove(pBlkKeyTuple + nextPos, pBlkKeyTuple + j, sizeof(SBlockKeyTuple));
}
- ++j;
- }
-
- dataBuf->ordered = true;
- pBlocks->numOfRows = i + 1;
+ pBlocks->numOfRows = i + 1;
+ }
}
dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize;
@@ -1251,10 +1249,18 @@ static int32_t parseBoundColumns(SInsertStatementParam *pInsertParam, SParsedDat
sToken = tStrGetToken(str, &index, false);
str += index;
+ char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character backstick(`)
+ strncpy(tmpTokenBuf, sToken.z, sToken.n);
+ sToken.z = tmpTokenBuf;
+
if (TK_STRING == sToken.type) {
tscDequoteAndTrimToken(&sToken);
}
+ if (TK_ID == sToken.type) {
+ tscRmEscapeAndTrimToken(&sToken);
+ }
+
if (sToken.type == TK_RP) {
if (end != NULL) { // set the end position
*end = str;
diff --git a/src/client/src/tscParseLineProtocol.c b/src/client/src/tscParseLineProtocol.c
index 6354ba9e9fe60758dc5b8ddafa618033a4d0ffa1..af57f7ec8c6c192bf84915abd86728ab8f195835 100644
--- a/src/client/src/tscParseLineProtocol.c
+++ b/src/client/src/tscParseLineProtocol.c
@@ -20,7 +20,7 @@
#include "tscParseLine.h"
typedef struct {
- char sTableName[TSDB_TABLE_NAME_LEN];
+ char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
SHashObj* tagHash;
SHashObj* fieldHash;
SArray* tags; //SArray
@@ -64,13 +64,13 @@ typedef enum {
} ESchemaAction;
typedef struct {
- char sTableName[TSDB_TABLE_NAME_LEN];
+ char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
SArray* tags; //SArray
SArray* fields; //SArray
} SCreateSTableActionInfo;
typedef struct {
- char sTableName[TSDB_TABLE_NAME_LEN];
+ char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
SSchema* field;
} SAlterSTableActionInfo;
@@ -155,13 +155,13 @@ static int32_t getSmlMd5ChildTableName(TAOS_SML_DATA_POINT* point, char* tableNa
qsort(point->tags, point->tagNum, sizeof(TAOS_SML_KV), compareSmlColKv);
SStringBuilder sb; memset(&sb, 0, sizeof(sb));
- char sTableName[TSDB_TABLE_NAME_LEN] = {0};
+ char sTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
strtolower(sTableName, point->stableName);
taosStringBuilderAppendString(&sb, sTableName);
for (int j = 0; j < point->tagNum; ++j) {
taosStringBuilderAppendChar(&sb, ',');
TAOS_SML_KV* tagKv = point->tags + j;
- char tagName[TSDB_COL_NAME_LEN] = {0};
+ char tagName[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
strtolower(tagName, tagKv->key);
taosStringBuilderAppendString(&sb, tagName);
taosStringBuilderAppendChar(&sb, '=');
@@ -214,8 +214,8 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
for (int j = 0; j < point->tagNum; ++j) {
TAOS_SML_KV* tagKv = point->tags + j;
if (!point->childTableName) {
- char childTableName[TSDB_TABLE_NAME_LEN];
- int32_t tableNameLen = TSDB_TABLE_NAME_LEN;
+ char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
+ int32_t tableNameLen = TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE;
getSmlMd5ChildTableName(point, childTableName, &tableNameLen, info);
point->childTableName = calloc(1, tableNameLen+1);
strncpy(point->childTableName, childTableName, tableNameLen);
@@ -261,7 +261,7 @@ static int32_t buildDataPointSchemas(TAOS_SML_DATA_POINT* points, int numPoint,
static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash, SArray* dbAttrArray, bool isTag, char sTableName[],
SSchemaAction* action, bool* actionNeeded, SSmlLinesInfo* info) {
- char fieldNameLowerCase[TSDB_COL_NAME_LEN] = {0};
+ char fieldNameLowerCase[TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
strtolower(fieldNameLowerCase, pointColField->name);
size_t* pDbIndex = taosHashGet(dbAttrHash, fieldNameLowerCase, strlen(fieldNameLowerCase));
@@ -281,7 +281,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash
action->action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
}
memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
- memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN);
+ memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
action->alterSTable.field = pointColField;
*actionNeeded = true;
}
@@ -292,7 +292,7 @@ static int32_t generateSchemaAction(SSchema* pointColField, SHashObj* dbAttrHash
action->action = SCHEMA_ACTION_ADD_COLUMN;
}
memset(&action->alterSTable, 0, sizeof(SAlterSTableActionInfo));
- memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN);
+ memcpy(action->alterSTable.sTableName, sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
action->alterSTable.field = pointColField;
*actionNeeded = true;
}
@@ -499,6 +499,7 @@ static int32_t fillDbSchema(STableMeta* tableMeta, char* tableName, SSmlSTableSc
for (int i=0; itableInfo.numOfColumns; ++i) {
SSchema field;
tstrncpy(field.name, tableMeta->schema[i].name, strlen(tableMeta->schema[i].name)+1);
+ addEscapeCharToString(field.name, (int16_t)strlen(field.name));
field.type = tableMeta->schema[i].type;
field.bytes = tableMeta->schema[i].bytes;
taosArrayPush(schema->fields, &field);
@@ -510,6 +511,7 @@ static int32_t fillDbSchema(STableMeta* tableMeta, char* tableName, SSmlSTableSc
int j = i + tableMeta->tableInfo.numOfColumns;
SSchema field;
tstrncpy(field.name, tableMeta->schema[j].name, strlen(tableMeta->schema[j].name)+1);
+ addEscapeCharToString(field.name, (int16_t)strlen(field.name));
field.type = tableMeta->schema[j].type;
field.bytes = tableMeta->schema[j].bytes;
taosArrayPush(schema->tags, &field);
@@ -534,7 +536,7 @@ static int32_t retrieveTableMeta(TAOS* taos, char* tableName, STableMeta** pTabl
tscDebug("SML:0x%" PRIx64 " retrieve table meta. super table name: %s", info->id, tableName);
- char tableNameLowerCase[TSDB_TABLE_NAME_LEN];
+ char tableNameLowerCase[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE];
strtolower(tableNameLowerCase, tableName);
char sql[256];
@@ -621,7 +623,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSchemaAction schemaAction = {0};
schemaAction.action = SCHEMA_ACTION_CREATE_STABLE;
memset(&schemaAction.createSTable, 0, sizeof(SCreateSTableActionInfo));
- memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN);
+ memcpy(schemaAction.createSTable.sTableName, pointSchema->sTableName, TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
schemaAction.createSTable.tags = pointSchema->tags;
schemaAction.createSTable.fields = pointSchema->fields;
applySchemaAction(taos, &schemaAction, info);
@@ -629,11 +631,11 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
if (code != 0) {
tscError("SML:0x%"PRIx64" reconcile point schema failed. can not create %s", info->id, pointSchema->sTableName);
return code;
- } else {
- pointSchema->precision = dbSchema.precision;
- destroySmlSTableSchema(&dbSchema);
}
- } else if (code == TSDB_CODE_SUCCESS) {
+ }
+
+ if (code == TSDB_CODE_SUCCESS) {
+ pointSchema->precision = dbSchema.precision;
size_t pointTagSize = taosArrayGetSize(pointSchema->tags);
size_t pointFieldSize = taosArrayGetSize(pointSchema->fields);
@@ -657,7 +659,7 @@ static int32_t modifyDBSchemas(TAOS* taos, SArray* stableSchemas, SSmlLinesInfo*
SSchema* pointColTs = taosArrayGet(pointSchema->fields, 0);
SSchema* dbColTs = taosArrayGet(dbSchema.fields, 0);
- memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN);
+ memcpy(pointColTs->name, dbColTs->name, TSDB_COL_NAME_LEN + TS_ESCAPE_CHAR_SIZE);
for (int j = 1; j < pointFieldSize; ++j) {
SSchema* pointCol = taosArrayGet(pointSchema->fields, j);
@@ -1175,6 +1177,16 @@ static void escapeSpecialCharacter(uint8_t field, const char **pos) {
*pos = cur;
}
+char* addEscapeCharToString(char *str, int32_t len) {
+ if (str == NULL) {
+ return NULL;
+ }
+ memmove(str + 1, str, len);
+ str[0] = str[len + 1] = TS_ESCAPE_CHAR;
+ str[len + 2] = '\0';
+ return str;
+}
+
bool isValidInteger(char *str) {
char *c = str;
if (*c != '+' && *c != '-' && !isdigit(*c)) {
@@ -1435,67 +1447,74 @@ static bool isNchar(char *pVal, uint16_t len) {
return false;
}
-static bool isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType, SSmlLinesInfo* info) {
+static int32_t isTimeStamp(char *pVal, uint16_t len, SMLTimeStampType *tsType, SSmlLinesInfo* info) {
if (len == 0) {
- return true;
+ return TSDB_CODE_SUCCESS;
}
if ((len == 1) && pVal[0] == '0') {
*tsType = SML_TIME_STAMP_NOW;
- return true;
+ return TSDB_CODE_SUCCESS;
}
- //Default no appendix
- if (isdigit(pVal[len - 1]) && isdigit(pVal[len - 2])) {
- if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
- if (info->tsType != SML_TIME_STAMP_NOT_CONFIGURED) {
- *tsType = info->tsType;
- } else {
- *tsType = SML_TIME_STAMP_NANO_SECONDS;
- }
- } else if (info->protocol == TSDB_SML_TELNET_PROTOCOL) {
- if (len == SML_TIMESTAMP_SECOND_DIGITS) {
- *tsType = SML_TIME_STAMP_SECONDS;
- } else if (len == SML_TIMESTAMP_MILLI_SECOND_DIGITS) {
- *tsType = SML_TIME_STAMP_MILLI_SECONDS;
- } else {
- return TSDB_CODE_TSC_INVALID_TIME_STAMP;
- }
+ for (int i = 0; i < len; ++i) {
+ if(!isdigit(pVal[i])) {
+ return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
- return true;
}
- if (pVal[len - 1] == 's') {
- switch (pVal[len - 2]) {
- case 'm':
- *tsType = SML_TIME_STAMP_MILLI_SECONDS;
- break;
- case 'u':
- *tsType = SML_TIME_STAMP_MICRO_SECONDS;
- break;
- case 'n':
- *tsType = SML_TIME_STAMP_NANO_SECONDS;
- break;
- default:
- if (isdigit(pVal[len - 2])) {
- *tsType = SML_TIME_STAMP_SECONDS;
- break;
- } else {
- return false;
- }
+ /* For InfluxDB line protocol use user passed timestamp precision
+ * For OpenTSDB protocols only 10 digit(seconds) or 13 digits(milliseconds)
+ * precision allowed
+ */
+ if (info->protocol == TSDB_SML_LINE_PROTOCOL) {
+ if (info->tsType != SML_TIME_STAMP_NOT_CONFIGURED) {
+ *tsType = info->tsType;
+ } else {
+ *tsType = SML_TIME_STAMP_NANO_SECONDS;
+ }
+ } else if (info->protocol == TSDB_SML_TELNET_PROTOCOL) {
+ if (len == SML_TIMESTAMP_SECOND_DIGITS) {
+ *tsType = SML_TIME_STAMP_SECONDS;
+ } else if (len == SML_TIMESTAMP_MILLI_SECOND_DIGITS) {
+ *tsType = SML_TIME_STAMP_MILLI_SECONDS;
+ } else {
+ return TSDB_CODE_TSC_INVALID_TIME_STAMP;
}
- //printf("Type is timestamp(%s)\n", pVal);
- return true;
}
- return false;
+ return TSDB_CODE_SUCCESS;
+
+ //if (pVal[len - 1] == 's') {
+ // switch (pVal[len - 2]) {
+ // case 'm':
+ // *tsType = SML_TIME_STAMP_MILLI_SECONDS;
+ // break;
+ // case 'u':
+ // *tsType = SML_TIME_STAMP_MICRO_SECONDS;
+ // break;
+ // case 'n':
+ // *tsType = SML_TIME_STAMP_NANO_SECONDS;
+ // break;
+ // default:
+ // if (isdigit(pVal[len - 2])) {
+ // *tsType = SML_TIME_STAMP_SECONDS;
+ // break;
+ // } else {
+ // return false;
+ // }
+ // }
+ // //printf("Type is timestamp(%s)\n", pVal);
+ // return true;
+ //}
+ //return false;
}
static bool convertStrToNumber(TAOS_SML_KV *pVal, char *str, SSmlLinesInfo* info) {
errno = 0;
uint8_t type = pVal->type;
int16_t length = pVal->length;
- int64_t val_s;
- uint64_t val_u;
- double val_d;
+ int64_t val_s = 0;
+ uint64_t val_u = 0;
+ double val_d = 0.0;
strntolower_s(str, str, (int32_t)strlen(str));
if (IS_FLOAT_TYPE(type)) {
@@ -1750,14 +1769,6 @@ bool convertSmlValueType(TAOS_SML_KV *pVal, char *value,
static int32_t getTimeStampValue(char *value, uint16_t len,
SMLTimeStampType type, int64_t *ts, SSmlLinesInfo* info) {
- if (len >= 2) {
- for (int i = 0; i < len - 2; ++i) {
- if(!isdigit(value[i])) {
- return TSDB_CODE_TSC_INVALID_TIME_STAMP;
- }
- }
- }
-
//No appendix or no timestamp given (len = 0)
if (len != 0 && type != SML_TIME_STAMP_NOW) {
*ts = (int64_t)strtoll(value, NULL, 10);
@@ -1803,16 +1814,16 @@ static int32_t getTimeStampValue(char *value, uint16_t len,
int32_t convertSmlTimeStamp(TAOS_SML_KV *pVal, char *value,
uint16_t len, SSmlLinesInfo* info) {
int32_t ret;
- SMLTimeStampType type;
+ SMLTimeStampType type = SML_TIME_STAMP_NOW;
int64_t tsVal;
- strntolower_s(value, value, len);
- if (!isTimeStamp(value, len, &type, info)) {
- return TSDB_CODE_TSC_INVALID_TIME_STAMP;
+ ret = isTimeStamp(value, len, &type, info);
+ if (ret != TSDB_CODE_SUCCESS) {
+ return ret;
}
ret = getTimeStampValue(value, len, type, &tsVal, info);
- if (ret) {
+ if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
tscDebug("SML:0x%"PRIx64"Timestamp after conversion:%"PRId64, info->id, tsVal);
@@ -1884,15 +1895,10 @@ bool checkDuplicateKey(char *key, SHashObj *pHash, SSmlLinesInfo* info) {
static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash, SSmlLinesInfo* info) {
const char *cur = *index;
char key[TSDB_COL_NAME_LEN + 1]; // +1 to avoid key[len] over write
- uint16_t len = 0;
+ int16_t len = 0;
- //key field cannot start with digit
- if (isdigit(*cur)) {
- tscError("SML:0x%"PRIx64" Tag key cannot start with digit", info->id);
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
- }
while (*cur != '\0') {
- if (len >= TSDB_COL_NAME_LEN - 1) {
+ if (len > TSDB_COL_NAME_LEN - 1) {
tscError("SML:0x%"PRIx64" Key field cannot exceeds %d characters", info->id, TSDB_COL_NAME_LEN - 1);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
@@ -1902,8 +1908,6 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
}
//Escape special character
if (*cur == '\\') {
- //TODO: escape will work after column & tag
- //support spcial characters
escapeSpecialCharacter(2, &cur);
}
key[len] = *cur;
@@ -1919,9 +1923,11 @@ static int32_t parseSmlKey(TAOS_SML_KV *pKV, const char **index, SHashObj *pHash
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
- pKV->key = calloc(len + 1, 1);
+ pKV->key = calloc(len + TS_ESCAPE_CHAR_SIZE + 1, 1);
memcpy(pKV->key, key, len + 1);
- //tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
+ strntolower_s(pKV->key, pKV->key, (int32_t)len);
+ addEscapeCharToString(pKV->key, len);
+ tscDebug("SML:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
*index = cur + 1;
return TSDB_CODE_SUCCESS;
}
@@ -1932,7 +1938,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
const char *start, *cur;
int32_t ret = TSDB_CODE_SUCCESS;
char *value = NULL;
- uint16_t len = 0;
+ int16_t len = 0;
bool searchQuote = false;
start = cur = *index;
@@ -1978,6 +1984,7 @@ static int32_t parseSmlValue(TAOS_SML_KV *pKV, const char **index,
//Escape special character
if (*cur == '\\') {
escapeSpecialCharacter(isTag ? 2 : 3, &cur);
+ len++;
}
cur++;
len++;
@@ -2013,21 +2020,15 @@ error:
static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index,
uint8_t *has_tags, SSmlLinesInfo* info) {
const char *cur = *index;
- uint16_t len = 0;
+ int16_t len = 0;
- pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + 1, 1); // +1 to avoid 1772 line over write
+ pSml->stableName = calloc(TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE, 1);
if (pSml->stableName == NULL){
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
- if (isdigit(*cur)) {
- tscError("SML:0x%"PRIx64" Measurement field cannnot start with digit", info->id);
- free(pSml->stableName);
- pSml->stableName = NULL;
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
- }
while (*cur != '\0') {
- if (len >= TSDB_TABLE_NAME_LEN - 1) {
+ if (len > TSDB_TABLE_NAME_LEN - 1) {
tscError("SML:0x%"PRIx64" Measurement field cannot exceeds %d characters", info->id, TSDB_TABLE_NAME_LEN - 1);
free(pSml->stableName);
pSml->stableName = NULL;
@@ -2061,7 +2062,7 @@ static int32_t parseSmlMeasurement(TAOS_SML_DATA_POINT *pSml, const char **index
pSml->stableName = NULL;
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
- pSml->stableName[len] = '\0';
+ addEscapeCharToString(pSml->stableName, len);
*index = cur + 1;
tscDebug("SML:0x%"PRIx64" Stable name in measurement:%s|len:%d", info->id, pSml->stableName, len);
@@ -2106,6 +2107,13 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
pkv = *pKVs;
}
+ size_t childTableNameLen = strlen(tsSmlChildTableName);
+ char childTableName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
+ if (childTableNameLen != 0) {
+ memcpy(childTableName, tsSmlChildTableName, childTableNameLen);
+ addEscapeCharToString(childTableName, (int32_t)(childTableNameLen));
+ }
+
while (*cur != '\0') {
ret = parseSmlKey(pkv, &cur, pHash, info);
if (ret) {
@@ -2117,17 +2125,12 @@ static int32_t parseSmlKvPairs(TAOS_SML_KV **pKVs, int *num_kvs,
tscError("SML:0x%"PRIx64" Unable to parse value", info->id);
goto error;
}
- if (!isField && (strcasecmp(pkv->key, "ID") == 0)) {
- ret = isValidChildTableName(pkv->value, pkv->length, info);
- if (ret) {
- free(pkv->key);
- free(pkv->value);
- goto error;
- }
- smlData->childTableName = malloc( pkv->length + 1);
+
+ if (!isField && childTableNameLen != 0 && strcasecmp(pkv->key, childTableName) == 0) {
+ smlData->childTableName = malloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1);
memcpy(smlData->childTableName, pkv->value, pkv->length);
strntolower_s(smlData->childTableName, smlData->childTableName, (int32_t)pkv->length);
- smlData->childTableName[pkv->length] = '\0';
+ addEscapeCharToString(smlData->childTableName, (int32_t)pkv->length);
free(pkv->key);
free(pkv->value);
} else {
@@ -2373,6 +2376,7 @@ static SSqlObj* createSmlQueryObj(TAOS* taos, int32_t affected_rows, int32_t cod
}
pNew->signature = pNew;
pNew->pTscObj = taos;
+ pNew->fp = NULL;
tsem_init(&pNew->rspSem, 0, 0);
registerSqlObj(pNew);
@@ -2409,7 +2413,7 @@ static SSqlObj* createSmlQueryObj(TAOS* taos, int32_t affected_rows, int32_t cod
TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int protocol, int precision) {
int code = TSDB_CODE_SUCCESS;
int affected_rows = 0;
- SMLTimeStampType tsType;
+ SMLTimeStampType tsType = SML_TIME_STAMP_NOW;
if (protocol == TSDB_SML_LINE_PROTOCOL) {
code = convertPrecisionType(precision, &tsType);
diff --git a/src/client/src/tscParseOpenTSDB.c b/src/client/src/tscParseOpenTSDB.c
index decef4887819f1467d8345e2f021bc7bc2286dfb..e78abf0596447df0ee58db88ca87b19011293c6c 100644
--- a/src/client/src/tscParseOpenTSDB.c
+++ b/src/client/src/tscParseOpenTSDB.c
@@ -37,18 +37,20 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index,
const char *cur = *index;
uint16_t len = 0;
- pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN, 1);
+ pSml->stableName = tcalloc(TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE, 1);
if (pSml->stableName == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+ /*
if (isdigit(*cur)) {
tscError("OTD:0x%"PRIx64" Metric cannot start with digit", info->id);
tfree(pSml->stableName);
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
+ */
while (*cur != '\0') {
- if (len >= TSDB_TABLE_NAME_LEN - 1) {
+ if (len > TSDB_TABLE_NAME_LEN - 1) {
tscError("OTD:0x%"PRIx64" Metric cannot exceeds %d characters", info->id, TSDB_TABLE_NAME_LEN - 1);
tfree(pSml->stableName);
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
@@ -63,7 +65,7 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index,
}
}
- pSml->stableName[len] = *cur;
+ pSml->stableName[len] = tolower(*cur);
cur++;
len++;
@@ -73,7 +75,7 @@ static int32_t parseTelnetMetric(TAOS_SML_DATA_POINT *pSml, const char **index,
return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
}
- pSml->stableName[len] = '\0';
+ addEscapeCharToString(pSml->stableName, len);
*index = cur + 1;
tscDebug("OTD:0x%"PRIx64" Stable name in metric:%s|len:%d", info->id, pSml->stableName, len);
@@ -207,12 +209,12 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj
uint16_t len = 0;
//key field cannot start with digit
- if (isdigit(*cur)) {
- tscError("OTD:0x%"PRIx64" Tag key cannot start with digit", info->id);
- return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
- }
+ //if (isdigit(*cur)) {
+ // tscError("OTD:0x%"PRIx64" Tag key cannot start with digit", info->id);
+ // return TSDB_CODE_TSC_LINE_SYNTAX_ERROR;
+ //}
while (*cur != '\0') {
- if (len >= TSDB_COL_NAME_LEN - 1) {
+ if (len > TSDB_COL_NAME_LEN - 1) {
tscError("OTD:0x%"PRIx64" Tag key cannot exceeds %d characters", info->id, TSDB_COL_NAME_LEN - 1);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
@@ -236,8 +238,10 @@ static int32_t parseTelnetTagKey(TAOS_SML_KV *pKV, const char **index, SHashObj
return TSDB_CODE_TSC_DUP_TAG_NAMES;
}
- pKV->key = tcalloc(len + 1, 1);
+ pKV->key = tcalloc(len + TS_ESCAPE_CHAR_SIZE + 1, 1);
memcpy(pKV->key, key, len + 1);
+ strntolower_s(pKV->key, pKV->key, (int32_t)len);
+ addEscapeCharToString(pKV->key, len);
//tscDebug("OTD:0x%"PRIx64" Key:%s|len:%d", info->id, pKV->key, len);
*index = cur + 1;
return TSDB_CODE_SUCCESS;
@@ -301,6 +305,12 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
*pKVs = tcalloc(capacity, sizeof(TAOS_SML_KV));
pkv = *pKVs;
+ size_t childTableNameLen = strlen(tsSmlChildTableName);
+ char childTbName[TSDB_TABLE_NAME_LEN + TS_ESCAPE_CHAR_SIZE] = {0};
+ if (childTableNameLen != 0) {
+ memcpy(childTbName, tsSmlChildTableName, childTableNameLen);
+ addEscapeCharToString(childTbName, (int32_t)(childTableNameLen));
+ }
while (*cur != '\0') {
ret = parseTelnetTagKey(pkv, &cur, pHash, info);
if (ret) {
@@ -312,15 +322,12 @@ static int32_t parseTelnetTagKvs(TAOS_SML_KV **pKVs, int *num_kvs,
tscError("OTD:0x%"PRIx64" Unable to parse value", info->id);
return ret;
}
- if ((strcasecmp(pkv->key, "ID") == 0)) {
- ret = isValidChildTableName(pkv->value, pkv->length, info);
- if (ret) {
- return ret;
- }
- *childTableName = malloc(pkv->length + 1);
+ if (childTableNameLen != 0 && strcasecmp(pkv->key, childTbName) == 0) {
+ *childTableName = tcalloc(pkv->length + TS_ESCAPE_CHAR_SIZE + 1, 1);
memcpy(*childTableName, pkv->value, pkv->length);
(*childTableName)[pkv->length] = '\0';
strntolower_s(*childTableName, *childTableName, (int32_t)pkv->length);
+ addEscapeCharToString(*childTableName, pkv->length);
tfree(pkv->key);
tfree(pkv->value);
} else {
@@ -493,19 +500,22 @@ static int32_t parseMetricFromJSON(cJSON *root, TAOS_SML_DATA_POINT* pSml, SSmlL
return TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH;
}
- pSml->stableName = tcalloc(stableLen + 1, sizeof(char));
+ pSml->stableName = tcalloc(stableLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
if (pSml->stableName == NULL){
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+ /*
if (isdigit(metric->valuestring[0])) {
tscError("OTD:0x%"PRIx64" Metric cannot start with digit in JSON", info->id);
tfree(pSml->stableName);
return TSDB_CODE_TSC_INVALID_JSON;
}
+ */
tstrncpy(pSml->stableName, metric->valuestring, stableLen + 1);
strntolower_s(pSml->stableName, pSml->stableName, (int32_t)stableLen);
+ addEscapeCharToString(pSml->stableName, (int32_t)stableLen);
return TSDB_CODE_SUCCESS;
@@ -889,27 +899,29 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs,
return TSDB_CODE_TSC_INVALID_JSON;
}
- //only pick up the first ID value as child table name
- cJSON *id = cJSON_GetObjectItem(tags, "ID");
- if (id != NULL) {
- if (!cJSON_IsString(id)) {
- tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id);
- return TSDB_CODE_TSC_INVALID_JSON;
- }
- size_t idLen = strlen(id->valuestring);
- ret = isValidChildTableName(id->valuestring, (int16_t)idLen, info);
- if (ret != TSDB_CODE_SUCCESS) {
- return ret;
- }
- *childTableName = tcalloc(idLen + 1, sizeof(char));
- memcpy(*childTableName, id->valuestring, idLen);
- strntolower_s(*childTableName, *childTableName, (int32_t)idLen);
-
- //check duplicate IDs
- cJSON_DeleteItemFromObject(tags, "ID");
- id = cJSON_GetObjectItem(tags, "ID");
+ //handle child table name
+ size_t childTableNameLen = strlen(tsSmlChildTableName);
+ char childTbName[TSDB_TABLE_NAME_LEN] = {0};
+ if (childTableNameLen != 0) {
+ memcpy(childTbName, tsSmlChildTableName, childTableNameLen);
+ cJSON *id = cJSON_GetObjectItem(tags, childTbName);
if (id != NULL) {
- return TSDB_CODE_TSC_DUP_TAG_NAMES;
+ if (!cJSON_IsString(id)) {
+ tscError("OTD:0x%"PRIx64" ID must be JSON string", info->id);
+ return TSDB_CODE_TSC_INVALID_JSON;
+ }
+ size_t idLen = strlen(id->valuestring);
+ *childTableName = tcalloc(idLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
+ memcpy(*childTableName, id->valuestring, idLen);
+ strntolower_s(*childTableName, *childTableName, (int32_t)idLen);
+ addEscapeCharToString(*childTableName, (int32_t)idLen);
+
+ //check duplicate IDs
+ cJSON_DeleteItemFromObject(tags, childTbName);
+ id = cJSON_GetObjectItem(tags, childTbName);
+ if (id != NULL) {
+ return TSDB_CODE_TSC_DUP_TAG_NAMES;
+ }
}
}
@@ -938,8 +950,10 @@ static int32_t parseTagsFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *num_kvs,
tscError("OTD:0x%"PRIx64" Tag key cannot exceeds %d characters in JSON", info->id, TSDB_COL_NAME_LEN - 1);
return TSDB_CODE_TSC_INVALID_COLUMN_LENGTH;
}
- pkv->key = tcalloc(keyLen + 1, sizeof(char));
+ pkv->key = tcalloc(keyLen + TS_ESCAPE_CHAR_SIZE + 1, sizeof(char));
strncpy(pkv->key, tag->string, keyLen);
+ strntolower_s(pkv->key, pkv->key, (int32_t)keyLen);
+ addEscapeCharToString(pkv->key, (int32_t)keyLen);
//value
ret = parseValueFromJSON(tag, pkv, info);
if (ret != TSDB_CODE_SUCCESS) {
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 6f0911c7542c1d508a8eac303ac7f78206c831aa..b82bf7bbd520d0649390715952515bdc1f418eae 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -2454,6 +2454,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
const char* msg12 = "parameter is out of range [1, 100]";
const char* msg13 = "parameter list required";
const char* msg14 = "third parameter algorithm must be 'default' or 't-digest'";
+ const char* msg15 = "parameter is out of range [1, 1000]";
switch (functionId) {
case TSDB_FUNC_COUNT: {
@@ -2901,11 +2902,15 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col
}
}
} else if (functionId == TSDB_FUNC_MAVG || functionId == TSDB_FUNC_SAMPLE) {
+ if (pVariant->nType != TSDB_DATA_TYPE_BIGINT) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
+ }
+
tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true);
- int64_t numRowsSelected = GET_INT32_VAL(val);
+ int64_t numRowsSelected = GET_INT64_VAL(val);
if (numRowsSelected <= 0 || numRowsSelected > 1000) {
- return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg12);
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg15);
}
// todo REFACTOR
@@ -3174,6 +3179,14 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SStrToken
int16_t columnIndex = COLUMN_INDEX_INITIAL_VAL;
+ char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // create tmp buf to avoid alter orginal sqlstr
+ strncpy(tmpTokenBuf, pToken->z, pToken->n);
+ pToken->z = tmpTokenBuf;
+
+ if (pToken->type == TK_ID) {
+ tscRmEscapeAndTrimToken(pToken);
+ }
+
for (int16_t i = 0; i < numOfCols; ++i) {
if (pToken->n != strlen(pSchema[i].name)) {
continue;
@@ -4515,10 +4528,8 @@ static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t
// check for match expression
static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) {
const char* msg1 = "regular expression string should be less than %d characters";
- const char* msg2 = "illegal column type for match/nmatch";
const char* msg3 = "invalid regular expression";
- tSqlExpr* pLeft = pExpr->pLeft;
tSqlExpr* pRight = pExpr->pRight;
if (pExpr->tokenId == TK_MATCH || pExpr->tokenId == TK_NMATCH) {
@@ -4528,11 +4539,6 @@ static int32_t validateMatchExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_
return invalidOperationMsg(msgBuf, tmp);
}
- SSchema* pSchema = tscGetTableSchema(pTableMeta);
- if ((!isTablenameToken(&pLeft->columnName)) &&(pSchema[index].type != TSDB_DATA_TYPE_BINARY)) {
- return invalidOperationMsg(msgBuf, msg2);
- }
-
if (!(pRight->type == SQL_NODE_VALUE && pRight->value.nType == TSDB_DATA_TYPE_BINARY)) {
return invalidOperationMsg(msgBuf, msg3);
}
@@ -5021,6 +5027,7 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
const char* msg1 = "super table join requires tags column";
const char* msg2 = "timestamp join condition missing";
const char* msg3 = "condition missing for join query";
+ const char* msg4 = "only ts column join allowed";
if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
if (pQueryInfo->numOfTables == 1) {
@@ -5038,6 +5045,8 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
if (pCondExpr->pJoinExpr == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
+ } else if ((!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) && pCondExpr->pJoinExpr) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (!pCondExpr->tsJoin) {
@@ -5592,10 +5601,14 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now";
+ const char* msg7 = "join query not supported fill operation";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
+ if(QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
+ return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
+ }
/*
* fill options are set at the end position, when all columns are set properly
@@ -5740,6 +5753,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
const char* msg9 = "orderby column must projected in subquery";
const char* msg10 = "not support distinct mixed with order by";
const char* msg11 = "not support order with udf";
+ const char* msg12 = "order by tags not supported with diff/derivative/csum/mavg";
setDefaultOrderInfo(pQueryInfo);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -5782,7 +5796,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
bool udf = false;
if (pQueryInfo->pUdfInfo && taosArrayGetSize(pQueryInfo->pUdfInfo) > 0) {
- int32_t usize = taosArrayGetSize(pQueryInfo->pUdfInfo);
+ int32_t usize = (int32_t)taosArrayGetSize(pQueryInfo->pUdfInfo);
for (int32_t i = 0; i < usize; ++i) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, i);
@@ -5838,6 +5852,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
size_t s = taosArrayGetSize(pSortOrder);
if (s == 1) {
if (orderByTags) {
+ if (tscIsDiffDerivLikeQuery(pQueryInfo)) {
+ return invalidOperationMsg(pMsgBuf, msg12);
+ }
pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
@@ -5950,6 +5967,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg11);
}
+ if (udf) {
+ return invalidOperationMsg(pMsgBuf, msg11);
+ }
+
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
@@ -6313,6 +6334,13 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
+ //handle Escape character backstick
+ if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) {
+ memmove(name.z, name.z + 1, name.n);
+ name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0';
+ name.n -= TS_ESCAPE_CHAR_SIZE;
+ }
+
if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsg, msg17);
}
@@ -6360,6 +6388,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SStrToken name = {.type = TK_STRING, .z = pItem->name, .n = (uint32_t)strlen(pItem->name)};
+ //handle Escape character backstick
+ if (name.z[0] == TS_ESCAPE_CHAR && name.z[name.n - 1] == TS_ESCAPE_CHAR) {
+ memmove(name.z, name.z + 1, name.n);
+ name.z[name.n - TS_ESCAPE_CHAR_SIZE] = '\0';
+ name.n -= TS_ESCAPE_CHAR_SIZE;
+ }
if (getColumnIndexByName(&name, pQueryInfo, &columnIndex, tscGetErrorMsgPayload(pCmd)) != TSDB_CODE_SUCCESS) {
return invalidOperationMsg(pMsg, msg17);
}
@@ -6646,6 +6680,9 @@ int32_t validateColumnName(char* name) {
}
return validateColumnName(token.z);
+ } else if (token.type == TK_ID) {
+ strRmquoteEscape(name, token.n);
+ return TSDB_CODE_SUCCESS;
} else {
if (isNumber(&token)) {
return TSDB_CODE_TSC_INVALID_OPERATION;
@@ -6781,16 +6818,21 @@ static int32_t setKeepOption(SSqlCmd* pCmd, SCreateDbMsg* pMsg, SCreateDbInfo* p
tVariantListItem* p1 = (s > 1) ? taosArrayGet(pKeep, 1) : p0;
tVariantListItem* p2 = (s > 2) ? taosArrayGet(pKeep, 2) : p1;
- if ((int32_t)p0->pVar.i64 <= 0 || (int32_t)p1->pVar.i64 <= 0 || (int32_t)p2->pVar.i64 <= 0) {
+ int32_t daysToKeep0 = (int32_t)p0->pVar.i64;
+ int32_t daysToKeep1 = (int32_t)(int32_t)p1->pVar.i64;
+ int32_t daysToKeep2 = (int32_t)p2->pVar.i64;
+ if (daysToKeep0 <= 0 || daysToKeep1 <= 0 || daysToKeep2 <= 0 ||
+ daysToKeep0 > TSDB_MAX_KEEP || daysToKeep1 > TSDB_MAX_KEEP || daysToKeep2 > TSDB_MAX_KEEP) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
- if (!(((int32_t)p0->pVar.i64 <= (int32_t)p1->pVar.i64) && ((int32_t)p1->pVar.i64 <= (int32_t)p2->pVar.i64))) {
+
+ if (!((daysToKeep0 <= daysToKeep1) && (daysToKeep1 <= daysToKeep2))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
- pMsg->daysToKeep0 = htonl((int32_t)p0->pVar.i64);
- pMsg->daysToKeep1 = htonl((int32_t)p1->pVar.i64);
- pMsg->daysToKeep2 = htonl((int32_t)p2->pVar.i64);
+ pMsg->daysToKeep0 = htonl(daysToKeep0);
+ pMsg->daysToKeep1 = htonl(daysToKeep1);
+ pMsg->daysToKeep2 = htonl(daysToKeep2);
}
@@ -7306,8 +7348,10 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, char*
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
// check if all the tags prj columns belongs to the group by columns
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
- // It is a groupby aggregate query, the tag project function is not suitable for this case.
- updateTagPrjFunction(pQueryInfo);
+ if (!tscIsDiffDerivLikeQuery(pQueryInfo)) {
+ // It is a groupby aggregate query, the tag project function is not suitable for this case.
+ updateTagPrjFunction(pQueryInfo);
+ }
return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
}
@@ -7711,7 +7755,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
SCreatedTableInfo* pCreateTableInfo = taosArrayGet(pCreateTable->childTableInfo, j);
SStrToken* pToken = &pCreateTableInfo->stableName;
-
+
bool dbIncluded = false;
char buf[TSDB_TABLE_FNAME_LEN];
SStrToken sTblToken;
@@ -7771,10 +7815,19 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
for (int32_t i = 0; i < nameSize; ++i) {
SStrToken* sToken = taosArrayGet(pNameList, i);
+
+ char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // create tmp buf to avoid alter orginal sqlstr
+ strncpy(tmpTokenBuf, sToken->z, sToken->n);
+ sToken->z = tmpTokenBuf;
+
if (TK_STRING == sToken->type) {
tscDequoteAndTrimToken(sToken);
}
+ if (TK_ID == sToken->type) {
+ tscRmEscapeAndTrimToken(sToken);
+ }
+
tVariantListItem* pItem = taosArrayGet(pValList, i);
findColumnIndex = false;
@@ -8617,7 +8670,7 @@ int32_t loadAllTableMeta(SSqlObj* pSql, struct SSqlInfo* pInfo) {
for (int32_t j = 0; j < usize; ++j) {
SUdfInfo* pUdfInfo = taosArrayGet(pQueryInfo->pUdfInfo, j);
- int32_t len = strlen(pUdfInfo->name);
+ int32_t len = (int32_t)strlen(pUdfInfo->name);
if (len == t->n && strncasecmp(info.name, pUdfInfo->name, t->n) == 0) {
exist = 1;
break;
@@ -9181,6 +9234,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
+ pQueryInfo->groupbyTag = tscGroupbyTag(pQueryInfo);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index b19af46a0c7f191b84d1ea8658f13456624179c9..0eba04ffb2e500e0d7a0ab6f005a217b6027f41c 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -2955,7 +2955,8 @@ int32_t tscGetTableMetaImpl(SSqlObj* pSql, STableMetaInfo *pTableMetaInfo, bool
// in case of child table, here only get the
if (pMeta->tableType == TSDB_CHILD_TABLE) {
int32_t code = tscCreateTableMetaFromSTableMeta(pSql, &pTableMetaInfo->pTableMeta, name, &pTableMetaInfo->tableMetaCapacity, (STableMeta **)(&pSTMeta));
- pSql->pBuf = (void *)(pSTMeta);
+ pSql->pBuf = (void *)(pSTMeta);
+ pMeta = pTableMetaInfo->pTableMeta;
if (code != TSDB_CODE_SUCCESS) {
return getTableMetaFromMnode(pSql, pTableMetaInfo, autocreate);
}
diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c
index caddde3f088c8ea65743070563a093921c3d2b2d..89da3c5640c6523d4d2a816b8ae0293310c5830a 100644
--- a/src/client/src/tscSql.c
+++ b/src/client/src/tscSql.c
@@ -629,6 +629,10 @@ static bool hasAdditionalErrorInfo(int32_t code, SSqlCmd *pCmd) {
return false;
}
+ if (pCmd->payload == NULL) {
+ return false;
+ }
+
size_t len = strlen(pCmd->payload);
char *z = NULL;
@@ -777,6 +781,16 @@ bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col) {
return isNull(((char*) pSql->res.urow[col]) + row * pInfo->field.bytes, pInfo->field.type);
}
+bool taos_is_update_query(TAOS_RES *res) {
+ SSqlObj *pSql = (SSqlObj *)res;
+ if (pSql == NULL || pSql->signature != pSql) {
+ return false;
+ }
+
+ SSqlCmd* pCmd = &pSql->cmd;
+ return ((pCmd->command >= TSDB_SQL_INSERT && pCmd->command <= TSDB_SQL_DROP_DNODE) || TSDB_SQL_RESET_CACHE == pCmd->command || TSDB_SQL_USE_DB == pCmd->command);
+}
+
int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) {
int len = 0;
@@ -905,7 +919,6 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
strtolower(pSql->sqlstr, sql);
-// pCmd->curSql = NULL;
if (NULL != pCmd->insertParam.pTableBlockHashList) {
taosHashCleanup(pCmd->insertParam.pTableBlockHashList);
pCmd->insertParam.pTableBlockHashList = NULL;
@@ -930,6 +943,17 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
return code;
}
+void taos_reset_current_db(TAOS *taos) {
+ STscObj* pObj = (STscObj*) taos;
+ if (pObj == NULL || pObj->signature != pObj) {
+ return;
+ }
+
+ pthread_mutex_lock(&pObj->mutex);
+ memset(pObj->db, 0, tListLen(pObj->db));
+ pthread_mutex_unlock(&pObj->mutex);
+}
+
void loadMultiTableMetaCallback(void *param, TAOS_RES *res, int code) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
if (pSql == NULL) {
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index 3f2d12e6d1be2517d98b83efaffb1125771597c1..73fdb02855e0bb0561630f87a2322385839698b1 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -752,7 +752,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *,
void taos_close_stream(TAOS_STREAM *handle) {
SSqlStream *pStream = (SSqlStream *)handle;
- SSqlObj *pSql = (SSqlObj *)atomic_exchange_ptr(&pStream->pSql, 0);
+ SSqlObj *pSql = pStream->pSql;
if (pSql == NULL) {
return;
}
@@ -763,13 +763,13 @@ void taos_close_stream(TAOS_STREAM *handle) {
*/
if (pSql->signature == pSql) {
tscRemoveFromStreamList(pStream, pSql);
+ pStream->pSql = NULL;
taosTmrStopA(&(pStream->pTimer));
tscDebug("0x%"PRIx64" stream:%p is closed", pSql->self, pStream);
// notify CQ to release the pStream object
pStream->fp(pStream->param, NULL, NULL);
- pStream->pSql = NULL;
taos_free_result(pSql);
tfree(pStream);
diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c
index 503bc1186b790036729d2914cd304a1c595b508b..f5702b4e35d69ced9bf024285f86dfb306900a60 100644
--- a/src/client/src/tscSubquery.c
+++ b/src/client/src/tscSubquery.c
@@ -2876,7 +2876,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
SSqlObj *userSql = pParentSql->rootObj;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
- if (userSql != pParentSql) {
+ if (userSql != pParentSql && pParentSql->freeParam != NULL) {
(*pParentSql->freeParam)(&pParentSql->param);
}
@@ -3729,6 +3729,25 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData;
}
+
+void tscSetQuerySort(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr) {
+ if (pQueryInfo->interval.interval <= 0) {
+ return;
+ }
+
+ if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
+ size_t size = taosArrayGetSize(pQueryInfo->pUpstream);
+ for(int32_t i = 0; i < size; ++i) {
+ SQueryInfo* pq = taosArrayGetP(pQueryInfo->pUpstream, i);
+ if (pq->groupbyTag && pq->interval.interval > 0) {
+ pQueryAttr->needSort = true;
+ return;
+ }
+ }
+ }
+}
+
+
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
char* sql, void* merger, int32_t stage, uint64_t qId) {
assert(pQueryInfo != NULL);
@@ -3831,6 +3850,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
SArray* pa = NULL;
if (stage == MASTER_SCAN) {
pQueryAttr->createFilterOperator = false; // no need for parent query
+ tscSetQuerySort(pQueryInfo, pQueryAttr);
pa = createExecOperatorPlan(pQueryAttr);
} else {
pa = createGlobalMergePlan(pQueryAttr);
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index b14c5af47a54e8917c97cec4d1e31acebda9f602..b03aef9e188b96d2bd3203720317a68f81c0a960 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -414,6 +414,19 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
+bool tscGroupbyTag(SQueryInfo* pQueryInfo) {
+ SGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
+ for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
+ SColIndex* pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
+ if (TSDB_COL_IS_TAG(pIndex->flag)) { // group by tag
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
@@ -1256,6 +1269,7 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t
}
*/
+
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
SSqlRes* pOutput = &pSql->res;
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index a01c3775397e25849d9e8ff70409db7ac0af90ba..3a5b49e9eee004f8a93121653781c23b5fd347bf 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -139,7 +139,7 @@ typedef uint64_t TKEY;
#define tdGetTKEY(key) (((TKEY)ABS(key)) | (TKEY_NEGATIVE_FLAG & (TKEY)(key)))
#define tdGetKey(tkey) (((TSKEY)((tkey)&TKEY_VALUE_FILTER)) * (TKEY_IS_NEGATIVE(tkey) ? -1 : 1))
-#define MIN_TS_KEY ((TSKEY)0x8000000000000001)
+#define MIN_TS_KEY ((TSKEY)0xc000000000000001)
#define MAX_TS_KEY ((TSKEY)0x3fffffffffffffff)
#define TD_TO_TKEY(key) tdGetTKEY(((key) < MIN_TS_KEY) ? MIN_TS_KEY : (((key) > MAX_TS_KEY) ? MAX_TS_KEY : key))
@@ -339,7 +339,9 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
-int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
+
+int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints, int rowOffset);
+
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
@@ -670,7 +672,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) {
#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
SMemRow tdMemRowDup(SMemRow row);
-void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull);
+void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset);
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) {
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index c91637b1e85d54fb53c55e8fab09c666263345bf..bd201d980017522d0e32f6124290305d5b136f8d 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -46,7 +46,7 @@ extern int64_t tsDnodeStartTime;
// common
extern int tsRpcTimer;
extern int tsRpcMaxTime;
-extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
+extern int tsRpcForceTcp; // all commands go to tcp protocol if this is enabled
extern int32_t tsMaxConnections;
extern int32_t tsMaxShellConns;
extern int32_t tsShellActivityTimer;
@@ -57,19 +57,20 @@ extern float tsRatioOfQueryCores;
extern int8_t tsDaylight;
extern char tsTimezone[];
extern char tsLocale[];
-extern char tsCharset[]; // default encode string
+extern char tsCharset[]; // default encode string
extern int8_t tsEnableCoreFile;
extern int32_t tsCompressMsgSize;
extern int32_t tsCompressColData;
extern int32_t tsMaxNumOfDistinctResults;
extern char tsTempDir[];
-//query buffer management
-extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
-extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing
-extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked
+// query buffer management
+extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
+extern int64_t
+ tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node during query processing
+extern int32_t tsRetrieveBlockingModel; // retrieve threads will be blocked
-extern int8_t tsKeepOriginalColumnName;
+extern int8_t tsKeepOriginalColumnName;
// client
extern int32_t tsMaxSQLStringLen;
@@ -108,7 +109,7 @@ extern int32_t tsQuorum;
extern int8_t tsUpdate;
extern int8_t tsCacheLastRow;
-//tsdb
+// tsdb
extern bool tsdbForceKeepFile;
extern bool tsdbForceCompactFile;
extern int32_t tsdbWalFlushSize;
@@ -134,6 +135,7 @@ extern int8_t tsHttpEnableCompress;
extern int8_t tsHttpEnableRecordSql;
extern int8_t tsTelegrafUseFieldNum;
extern int8_t tsHttpDbNameMandatory;
+extern int32_t tsHttpKeepAlive;
// mqtt
extern int8_t tsEnableMqttModule;
@@ -170,22 +172,22 @@ extern int64_t tsTickPerDay[3];
extern int32_t tsTopicBianryLen;
// system info
-extern char tsOsName[];
-extern int64_t tsPageSize;
-extern int64_t tsOpenMax;
-extern int64_t tsStreamMax;
-extern int32_t tsNumOfCores;
-extern float tsTotalLogDirGB;
-extern float tsTotalTmpDirGB;
-extern float tsTotalDataDirGB;
-extern float tsAvailLogDirGB;
-extern float tsAvailTmpDirectorySpace;
-extern float tsAvailDataDirGB;
-extern float tsUsedDataDirGB;
-extern float tsMinimalLogDirGB;
-extern float tsReservedTmpDirectorySpace;
-extern float tsMinimalDataDirGB;
-extern int32_t tsTotalMemoryMB;
+extern char tsOsName[];
+extern int64_t tsPageSize;
+extern int64_t tsOpenMax;
+extern int64_t tsStreamMax;
+extern int32_t tsNumOfCores;
+extern float tsTotalLogDirGB;
+extern float tsTotalTmpDirGB;
+extern float tsTotalDataDirGB;
+extern float tsAvailLogDirGB;
+extern float tsAvailTmpDirectorySpace;
+extern float tsAvailDataDirGB;
+extern float tsUsedDataDirGB;
+extern float tsMinimalLogDirGB;
+extern float tsReservedTmpDirectorySpace;
+extern float tsMinimalDataDirGB;
+extern int32_t tsTotalMemoryMB;
extern uint32_t tsVersion;
// build info
@@ -196,41 +198,45 @@ extern char gitinfoOfInternal[];
extern char buildinfo[];
// log
-extern int8_t tsAsyncLog;
-extern int32_t tsNumOfLogLines;
-extern int32_t tsLogKeepDays;
-extern int32_t dDebugFlag;
-extern int32_t vDebugFlag;
-extern int32_t mDebugFlag;
+extern int8_t tsAsyncLog;
+extern int32_t tsNumOfLogLines;
+extern int32_t tsLogKeepDays;
+extern int32_t dDebugFlag;
+extern int32_t vDebugFlag;
+extern int32_t mDebugFlag;
extern uint32_t cDebugFlag;
-extern int32_t jniDebugFlag;
-extern int32_t tmrDebugFlag;
-extern int32_t sdbDebugFlag;
-extern int32_t httpDebugFlag;
-extern int32_t mqttDebugFlag;
-extern int32_t monDebugFlag;
-extern int32_t uDebugFlag;
-extern int32_t rpcDebugFlag;
-extern int32_t odbcDebugFlag;
+extern int32_t jniDebugFlag;
+extern int32_t tmrDebugFlag;
+extern int32_t sdbDebugFlag;
+extern int32_t httpDebugFlag;
+extern int32_t mqttDebugFlag;
+extern int32_t monDebugFlag;
+extern int32_t uDebugFlag;
+extern int32_t rpcDebugFlag;
+extern int32_t odbcDebugFlag;
extern uint32_t qDebugFlag;
-extern int32_t wDebugFlag;
-extern int32_t cqDebugFlag;
-extern int32_t debugFlag;
+extern int32_t wDebugFlag;
+extern int32_t cqDebugFlag;
+extern int32_t debugFlag;
+
+extern int8_t tsClientMerge;
#ifdef TD_TSZ
// lossy
-extern char lossyColumns[];
-extern double fPrecision;
-extern double dPrecision;
+extern char lossyColumns[];
+extern double fPrecision;
+extern double dPrecision;
extern uint32_t maxRange;
extern uint32_t curRange;
-extern char Compressor[];
+extern char Compressor[];
#endif
// long query
extern int8_t tsDeadLockKillQuery;
// schemaless
extern char tsDefaultJSONStrType[];
+extern char tsSmlChildTableName[];
+
typedef struct {
char dir[TSDB_FILENAME_LEN];
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 61378c79c4b5c44ffa11ae9132aa6f8b89ab5f71..bdb4e743a0db92074bdfd45431619019725be2c7 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -239,9 +239,12 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->len = 0;
}
-// value from timestamp should be TKEY here instead of TSKEY
-int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
- ASSERT(pCol != NULL && value != NULL);
+/**
+ * value from timestamp should be TKEY here instead of TSKEY.
+ * - rowOffset: 0 for current row, -1 for previous row
+ */
+int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints, int rowOffset) {
+ ASSERT(pCol != NULL && value != NULL && (rowOffset == 0 || rowOffset == -1));
if (isAllRowsNull(pCol)) {
if (isNull(value, pCol->type)) {
@@ -257,16 +260,29 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
}
if (IS_VAR_DATA_TYPE(pCol->type)) {
- // set offset
- pCol->dataOff[numOfRows] = pCol->len;
- // Copy data
- memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
- // Update the length
- pCol->len += varDataTLen(value);
+ if (rowOffset == 0) {
+ // set offset
+ pCol->dataOff[numOfRows] = pCol->len;
+ // Copy data
+ memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, varDataTLen(value));
+ // Update the length
+ pCol->len += varDataTLen(value);
+ } else {
+ // Copy data
+ void *lastValue = POINTER_SHIFT(pCol->pData, pCol->dataOff[numOfRows]);
+ int lastValLen = varDataTLen(lastValue);
+ memcpy(lastValue, value, varDataTLen(value));
+ // Update the length
+ pCol->len -= lastValLen;
+ pCol->len += varDataTLen(value);
+ }
} else {
- ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
- memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
- pCol->len += pCol->bytes;
+ // update the value of last row with increasing the pCol->len and keeping the numOfRows for partial update
+ ASSERT(pCol->len == (TYPE_BYTES[pCol->type] * (numOfRows - rowOffset)));
+ memcpy(POINTER_SHIFT(pCol->pData, (pCol->len + rowOffset * TYPE_BYTES[pCol->type])), value, pCol->bytes);
+ if (rowOffset == 0) {
+ pCol->len += pCol->bytes;
+ }
}
return 0;
}
@@ -441,7 +457,8 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
-static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
+static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull,
+ int rowOffset) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
@@ -451,7 +468,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
- dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
dcol++;
continue;
}
@@ -460,14 +477,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
if(!isNull(value, pDataCol->type)) setCol = 1;
- dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
+ dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
if(forceSetNull || setCol) {
- dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
dcol++;
}
@@ -475,7 +492,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
pCols->numOfRows++;
}
-static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
+static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
int rcol = 0;
@@ -487,7 +504,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
- dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
++dcol;
continue;
}
@@ -497,14 +514,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
if(!isNull(value, pDataCol->type)) setCol = 1;
- dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
+ dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
if(forceSetNull || setCol) {
- dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
+ dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
++dcol;
}
@@ -512,11 +529,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
pCols->numOfRows++;
}
-void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
+void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset) {
if (isDataRow(row)) {
- tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull);
+ tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull, rowOffset);
} else if (isKvRow(row)) {
- tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull);
+ tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull, rowOffset);
} else {
ASSERT(0);
}
@@ -539,7 +556,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0 || target->cols[j].len > 0) {
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
- target->maxPoints);
+ target->maxPoints, 0);
}
}
target->numOfRows++;
@@ -583,7 +600,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
ASSERT(target->cols[i].type == src1->cols[i].type);
if (src1->cols[i].len > 0 || target->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
- target->maxPoints);
+ target->maxPoints, 0);
}
}
@@ -595,10 +612,10 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
- target->maxPoints);
+ target->maxPoints, 0);
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
- target->maxPoints);
+ target->maxPoints, 0);
} else if(target->cols[i].len > 0) {
dataColSetNullAt(&target->cols[i], target->numOfRows);
}
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index f3ba69ec40d8ac76f8db0fc84667a1cf402bc4d0..c1a254b4ebd5fdfe1d29e02ab7cacbe3195058f1 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -14,18 +14,18 @@
*/
#define _DEFAULT_SOURCE
+#include "tglobal.h"
+#include "monitor.h"
#include "os.h"
#include "taosdef.h"
#include "taoserror.h"
-#include "tulog.h"
+#include "tcompare.h"
#include "tconfig.h"
-#include "tglobal.h"
-#include "monitor.h"
-#include "tsocket.h"
-#include "tutil.h"
#include "tlocale.h"
+#include "tsocket.h"
#include "ttimezone.h"
-#include "tcompare.h"
+#include "tulog.h"
+#include "tutil.h"
// cluster
char tsFirst[TSDB_EP_LEN] = {0};
@@ -49,16 +49,16 @@ int32_t tsDnodeId = 0;
int64_t tsDnodeStartTime = 0;
// common
-int32_t tsRpcTimer = 300;
-int32_t tsRpcMaxTime = 600; // seconds;
-int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default
-int32_t tsMaxShellConns = 50000;
+int32_t tsRpcTimer = 300;
+int32_t tsRpcMaxTime = 600; // seconds;
+int32_t tsRpcForceTcp = 0; // disable this, means query, show command use udp protocol as default
+int32_t tsMaxShellConns = 50000;
int32_t tsMaxConnections = 5000;
-int32_t tsShellActivityTimer = 3; // second
+int32_t tsShellActivityTimer = 3; // second
float tsNumOfThreadsPerCore = 1.0f;
int32_t tsNumOfCommitThreads = 4;
float tsRatioOfQueryCores = 1.0f;
-int8_t tsDaylight = 0;
+int8_t tsDaylight = 0;
char tsTimezone[TSDB_TIMEZONE_LEN] = {0};
char tsLocale[TSDB_LOCALE_LEN] = {0};
char tsCharset[TSDB_LOCALE_LEN] = {0}; // default encode string
@@ -87,7 +87,7 @@ int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN;
int32_t tsMaxWildCardsLen = TSDB_PATTERN_STRING_DEFAULT_LEN;
int32_t tsMaxRegexStringLen = TSDB_REGEX_STRING_DEFAULT_LEN;
-int8_t tsTscEnableRecordSql = 0;
+int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
@@ -97,7 +97,7 @@ int32_t tsMaxNumOfOrderedResults = 1000000;
int32_t tsMinSlidingTime = 10;
// the maxinum number of distict query result
-int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
+int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 us for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
@@ -109,7 +109,7 @@ int32_t tsMaxStreamComputDelay = 20000;
int32_t tsStreamCompStartDelay = 10000;
// the stream computing delay time after executing failed, change accordingly
-int32_t tsRetryStreamCompDelay = 10*1000;
+int32_t tsRetryStreamCompDelay = 10 * 1000;
// The delayed computing ration. 10% of the whole computing time window by default.
float tsStreamComputDelayRatio = 0.1f;
@@ -128,41 +128,41 @@ int64_t tsQueryBufferSizeBytes = -1;
int32_t tsRetrieveBlockingModel = 0;
// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name
-int8_t tsKeepOriginalColumnName = 0;
+int8_t tsKeepOriginalColumnName = 0;
// db parameters
int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE;
int32_t tsBlocksPerVnode = TSDB_DEFAULT_TOTAL_BLOCKS;
-int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
-int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
+int16_t tsDaysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
+int32_t tsDaysToKeep = TSDB_DEFAULT_KEEP;
int32_t tsMinRowsInFileBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
int32_t tsMaxRowsInFileBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
-int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
+int16_t tsCommitTime = TSDB_DEFAULT_COMMIT_TIME; // seconds
int32_t tsTimePrecision = TSDB_DEFAULT_PRECISION;
-int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
-int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
-int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
-int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
-int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
-int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION;
-int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
-int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW;
-int32_t tsMaxVgroupsPerDb = 0;
+int8_t tsCompression = TSDB_DEFAULT_COMP_LEVEL;
+int8_t tsWAL = TSDB_DEFAULT_WAL_LEVEL;
+int32_t tsFsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
+int32_t tsReplications = TSDB_DEFAULT_DB_REPLICA_OPTION;
+int32_t tsQuorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
+int16_t tsPartitons = TSDB_DEFAULT_DB_PARTITON_OPTION;
+int8_t tsUpdate = TSDB_DEFAULT_DB_UPDATE_OPTION;
+int8_t tsCacheLastRow = TSDB_DEFAULT_CACHE_LAST_ROW;
+int32_t tsMaxVgroupsPerDb = 0;
int32_t tsMinTablePerVnode = TSDB_TABLES_STEP;
int32_t tsMaxTablePerVnode = TSDB_DEFAULT_TABLES;
int32_t tsTableIncStepPerVnode = TSDB_TABLES_STEP;
int32_t tsTsdbMetaCompactRatio = TSDB_META_COMPACT_RATIO;
-// tsdb config
+// tsdb config
// For backward compatibility
bool tsdbForceKeepFile = false;
-bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly
+bool tsdbForceCompactFile = false; // compact TSDB fileset forcibly
int32_t tsdbWalFlushSize = TSDB_DEFAULT_WAL_FLUSH_SIZE; // MB
// balance
int8_t tsEnableBalance = 1;
int8_t tsAlternativeRole = 0;
-int32_t tsBalanceInterval = 300; // seconds
+int32_t tsBalanceInterval = 300; // seconds
int32_t tsOfflineThreshold = 86400 * 10; // seconds of 10 days
int32_t tsMnodeEqualVnodeNum = 4;
int8_t tsEnableFlowCtrl = 1;
@@ -180,15 +180,16 @@ int8_t tsHttpEnableCompress = 1;
int8_t tsHttpEnableRecordSql = 0;
int8_t tsTelegrafUseFieldNum = 0;
int8_t tsHttpDbNameMandatory = 0;
+int32_t tsHttpKeepAlive = 30000;
// mqtt
int8_t tsEnableMqttModule = 0; // not finished yet, not started it by default
-char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
-char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
-char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
-char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
-char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
-char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
+char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
+char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
+char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
+char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
+char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
+char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
// monitor
int8_t tsEnableMonitorModule = 1;
@@ -197,7 +198,7 @@ char tsInternalPass[] = "secretkey";
int32_t tsMonitorInterval = 30; // seconds
// stream
-int8_t tsEnableStream = 1;
+int8_t tsEnableStream = 1;
// internal
int8_t tsCompactMnodeWal = 0;
@@ -213,7 +214,7 @@ char tsDataDir[PATH_MAX] = {0};
char tsScriptDir[PATH_MAX] = {0};
char tsTempDir[PATH_MAX] = "/tmp/";
-int32_t tsDiskCfgNum = 0;
+int32_t tsDiskCfgNum = 0;
int32_t tsTopicBianryLen = 16000;
#ifndef _STORAGE
@@ -231,54 +232,57 @@ SDiskCfg tsDiskCfg[TSDB_MAX_DISKS];
int64_t tsTickPerDay[] = {86400000L, 86400000000L, 86400000000000L};
// system info
-char tsOsName[10] = "Linux";
-int64_t tsPageSize;
-int64_t tsOpenMax;
-int64_t tsStreamMax;
-int32_t tsNumOfCores = 1;
-float tsTotalTmpDirGB = 0;
-float tsTotalDataDirGB = 0;
-float tsAvailTmpDirectorySpace = 0;
-float tsAvailDataDirGB = 0;
-float tsUsedDataDirGB = 0;
-float tsReservedTmpDirectorySpace = 1.0f;
-float tsMinimalDataDirGB = 2.0f;
-int32_t tsTotalMemoryMB = 0;
+char tsOsName[10] = "Linux";
+int64_t tsPageSize;
+int64_t tsOpenMax;
+int64_t tsStreamMax;
+int32_t tsNumOfCores = 1;
+float tsTotalTmpDirGB = 0;
+float tsTotalDataDirGB = 0;
+float tsAvailTmpDirectorySpace = 0;
+float tsAvailDataDirGB = 0;
+float tsUsedDataDirGB = 0;
+float tsReservedTmpDirectorySpace = 1.0f;
+float tsMinimalDataDirGB = 2.0f;
+int32_t tsTotalMemoryMB = 0;
uint32_t tsVersion = 0;
// log
-int32_t tsNumOfLogLines = 10000000;
-int32_t mDebugFlag = 131;
-int32_t sdbDebugFlag = 131;
-int32_t dDebugFlag = 135;
-int32_t vDebugFlag = 135;
+int32_t tsNumOfLogLines = 10000000;
+int32_t mDebugFlag = 131;
+int32_t sdbDebugFlag = 131;
+int32_t dDebugFlag = 135;
+int32_t vDebugFlag = 135;
uint32_t cDebugFlag = 131;
-int32_t jniDebugFlag = 131;
-int32_t odbcDebugFlag = 131;
-int32_t httpDebugFlag = 131;
-int32_t mqttDebugFlag = 131;
-int32_t monDebugFlag = 131;
+int32_t jniDebugFlag = 131;
+int32_t odbcDebugFlag = 131;
+int32_t httpDebugFlag = 131;
+int32_t mqttDebugFlag = 131;
+int32_t monDebugFlag = 131;
uint32_t qDebugFlag = 131;
-int32_t rpcDebugFlag = 131;
-int32_t uDebugFlag = 131;
-int32_t debugFlag = 0;
-int32_t sDebugFlag = 135;
-int32_t wDebugFlag = 135;
-int32_t tsdbDebugFlag = 131;
-int32_t cqDebugFlag = 131;
-int32_t fsDebugFlag = 135;
+int32_t rpcDebugFlag = 131;
+int32_t uDebugFlag = 131;
+int32_t debugFlag = 0;
+int32_t sDebugFlag = 135;
+int32_t wDebugFlag = 135;
+int32_t tsdbDebugFlag = 131;
+int32_t cqDebugFlag = 131;
+int32_t fsDebugFlag = 135;
+
+int8_t tsClientMerge = 0;
#ifdef TD_TSZ
//
// lossy compress 6
//
-char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty can close lossy compress.
-// below option can take effect when tsLossyColumns not empty
-double fPrecision = 1E-8; // float column precision
-double dPrecision = 1E-16; // double column precision
-uint32_t maxRange = 500; // max range
-uint32_t curRange = 100; // range
-char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
+char lossyColumns[32] = ""; // "float|double" means all float and double columns can be lossy compressed. set empty
+ // can close lossy compress.
+// below option can take effect when tsLossyColumns not empty
+double fPrecision = 1E-8; // float column precision
+double dPrecision = 1E-16; // double column precision
+uint32_t maxRange = 500; // max range
+uint32_t curRange = 100; // range
+char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR
#endif
// long query death-lock
@@ -286,6 +290,7 @@ int8_t tsDeadLockKillQuery = 0;
// default JSON string type
char tsDefaultJSONStrType[7] = "binary";
+char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash.
int32_t (*monStartSystemFp)() = NULL;
void (*monStopSystemFp)() = NULL;
@@ -296,7 +301,7 @@ char *qtypeStr[] = {"rpc", "fwd", "wal", "cq", "query"};
static pthread_once_t tsInitGlobalCfgOnce = PTHREAD_ONCE_INIT;
void taosSetAllDebugFlag() {
- if (debugFlag != 0) {
+ if (debugFlag != 0) {
mDebugFlag = debugFlag;
sdbDebugFlag = debugFlag;
dDebugFlag = debugFlag;
@@ -307,7 +312,7 @@ void taosSetAllDebugFlag() {
httpDebugFlag = debugFlag;
mqttDebugFlag = debugFlag;
monDebugFlag = debugFlag;
- qDebugFlag = debugFlag;
+ qDebugFlag = debugFlag;
rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag;
sDebugFlag = debugFlag;
@@ -319,12 +324,13 @@ void taosSetAllDebugFlag() {
}
bool taosCfgDynamicOptions(char *msg) {
- char *option, *value;
- int32_t olen, vlen;
- int32_t vint = 0;
+ char *option, *value;
+ int32_t olen, vlen;
+ int32_t vint = 0;
paGetToken(msg, &option, &olen);
- if (olen == 0) return false;;
+ if (olen == 0) return false;
+ ;
paGetToken(option + olen + 1, &value, &vlen);
if (vlen == 0)
@@ -337,9 +343,9 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i;
- //if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
+ // if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32 && cfg->valType != TAOS_CFG_VTYPE_INT8) continue;
-
+
int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue;
@@ -368,7 +374,7 @@ bool taosCfgDynamicOptions(char *msg) {
return true;
}
if (strncasecmp(cfg->option, "debugFlag", olen) == 0) {
- taosSetAllDebugFlag();
+ taosSetAllDebugFlag();
}
return true;
}
@@ -425,7 +431,7 @@ static void taosCheckDataDirCfg() {
}
static int32_t taosCheckTmpDir(void) {
- if (strlen(tsTempDir) <= 0){
+ if (strlen(tsTempDir) <= 0) {
uError("tempDir is not set");
return -1;
}
@@ -446,7 +452,7 @@ static void doInitGlobalConfig(void) {
srand(taosSafeRand());
SGlobalCfg cfg = {0};
-
+
// ip address
cfg.option = "firstEp";
cfg.ptr = tsFirst;
@@ -575,12 +581,12 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsMaxNumOfDistinctResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
- cfg.minValue = 10*10000;
- cfg.maxValue = 10000*10000;
+ cfg.minValue = 10 * 10000;
+ cfg.maxValue = 10000 * 10000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
-
+
cfg.option = "numOfMnodes";
cfg.ptr = &tsNumOfMnodes;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -1187,7 +1193,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- // module configs
+ // module configs
cfg.option = "flowctrl";
cfg.ptr = &tsEnableFlowCtrl;
cfg.valType = TAOS_CFG_VTYPE_INT8;
@@ -1318,6 +1324,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ // pContext in cache
+ cfg.option = "httpKeepAlive";
+ cfg.ptr = &tsHttpKeepAlive;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
+ cfg.minValue = 3000;
+ cfg.maxValue = 3600000;
+ cfg.ptrLength = 0;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// debug flag
cfg.option = "numOfLogLines";
cfg.ptr = &tsNumOfLogLines;
@@ -1399,7 +1416,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
-
cfg.option = "sdbDebugFlag";
cfg.ptr = &sdbDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@@ -1631,7 +1647,7 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- // enable kill long query
+ // enable kill long query
cfg.option = "deadLockKillQuery";
cfg.ptr = &tsDeadLockKillQuery;
cfg.valType = TAOS_CFG_VTYPE_INT8;
@@ -1642,6 +1658,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "clientMerge";
+ cfg.ptr = &tsClientMerge;
+ cfg.valType = TAOS_CFG_VTYPE_INT8;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 1;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// default JSON string type option "binary"/"nchar"
cfg.option = "defaultJSONStrType";
cfg.ptr = tsDefaultJSONStrType;
@@ -1653,6 +1679,17 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ // child talbe name specified in schemaless tag value
+ cfg.option = "smlChildTableName";
+ cfg.ptr = tsSmlChildTableName;
+ cfg.valType = TAOS_CFG_VTYPE_STRING;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT;
+ cfg.minValue = 0;
+ cfg.maxValue = 0;
+ cfg.ptrLength = tListLen(tsSmlChildTableName);
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
// flush vnode wal file if walSize > walFlushSize and walSize > cache*0.5*blocks
cfg.option = "walFlushSize";
cfg.ptr = &tsdbWalFlushSize;
@@ -1715,25 +1752,22 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
- assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM);
+ assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#else
- assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5);
+ assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#endif
-
}
-void taosInitGlobalCfg() {
- pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig);
-}
+void taosInitGlobalCfg() { pthread_once(&tsInitGlobalCfgOnce, doInitGlobalConfig); }
int32_t taosCheckGlobalCfg() {
- char fqdn[TSDB_FQDN_LEN];
+ char fqdn[TSDB_FQDN_LEN];
uint16_t port;
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag();
}
-
+
if (tsLocalFqdn[0] == 0) {
taosGetFqdn(tsLocalFqdn);
}
@@ -1760,7 +1794,7 @@ int32_t taosCheckGlobalCfg() {
if (taosCheckTmpDir()) {
return -1;
}
-
+
taosGetSystemInfo();
tsSetLocale();
@@ -1782,8 +1816,8 @@ int32_t taosCheckGlobalCfg() {
}
if (tsMaxTablePerVnode < tsMinTablePerVnode) {
- uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)",
- tsMaxTablePerVnode, tsMinTablePerVnode, tsMinTablePerVnode);
+ uError("maxTablesPerVnode(%d) < minTablesPerVnode(%d), reset to minTablesPerVnode(%d)", tsMaxTablePerVnode,
+ tsMinTablePerVnode, tsMinTablePerVnode);
tsMaxTablePerVnode = tsMinTablePerVnode;
}
@@ -1805,7 +1839,7 @@ int32_t taosCheckGlobalCfg() {
}
tsDnodeShellPort = tsServerPort + TSDB_PORT_DNODESHELL; // udp[6035-6039] tcp[6035]
- tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp
+ tsDnodeDnodePort = tsServerPort + TSDB_PORT_DNODEDNODE; // udp/tcp
tsSyncPort = tsServerPort + TSDB_PORT_SYNC;
tsHttpPort = tsServerPort + TSDB_PORT_HTTP;
@@ -1825,17 +1859,17 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
strcpy(fqdn, ep);
char *temp = strchr(fqdn, ':');
- if (temp) {
+ if (temp) {
*temp = 0;
- *port = atoi(temp+1);
- }
-
+ *port = atoi(temp + 1);
+ }
+
if (*port == 0) {
*port = tsServerPort;
return -1;
}
- return 0;
+ return 0;
}
/*
diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin
index edad746514b2a53a8cf6061c93b98b52a5388692..792ef7c3036f15068796e09883d3f4d47a038fe2 160000
--- a/src/connector/grafanaplugin
+++ b/src/connector/grafanaplugin
@@ -1 +1 @@
-Subproject commit edad746514b2a53a8cf6061c93b98b52a5388692
+Subproject commit 792ef7c3036f15068796e09883d3f4d47a038fe2
diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt
index 065dedac63372f5c71146ee9937a6e136d71ce81..c5b59baefedc38fa4bf558526a8c4a1777bfb7bb 100644
--- a/src/connector/jdbc/CMakeLists.txt
+++ b/src/connector/jdbc/CMakeLists.txt
@@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED)
ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME}
POST_BUILD
COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
- COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.35-dist.jar ${LIBRARY_OUTPUT_PATH}
+ COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.36-dist.jar ${LIBRARY_OUTPUT_PATH}
COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml
COMMENT "build jdbc driver")
ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME})
diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml
index 7caf46848d18c4491cdea1ab50df31d8d2d26daf..926a5ef483d9f1da07dbfdeb796567d3ea077c87 100755
--- a/src/connector/jdbc/deploy-pom.xml
+++ b/src/connector/jdbc/deploy-pom.xml
@@ -5,7 +5,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.35
+ 2.0.36
jar
JDBCDriver
diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml
index 810a85f8a33b3f244dab81e349b9df786ec50c21..04115e2a0ebc5924a51862cd9a49a5352cf6a5b6 100644
--- a/src/connector/jdbc/pom.xml
+++ b/src/connector/jdbc/pom.xml
@@ -3,7 +3,7 @@
4.0.0
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.35
+ 2.0.36
jar
JDBCDriver
https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc
@@ -58,6 +58,13 @@
4.13.1
test
+
+
+ commons-logging
+ commons-logging
+ 1.2
+ test
+
@@ -70,6 +77,18 @@
+
+ org.apache.maven.plugins
+ maven-source-plugin
+
+
+ attach-sources
+
+ jar
+
+
+
+
org.apache.maven.plugins
maven-assembly-plugin
@@ -112,6 +131,7 @@
**/*Test.java
+ **/HttpClientPoolUtilTest.java
**/AppMemoryLeakTest.java
**/JDBCTypeAndTypeCompareTest.java
**/ConnectMultiTaosdByRestfulWithDifferentTokenTest.java
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatementWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatementWrapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..0b46226d1113b82d9333204427eaad074d3572cb
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatementWrapper.java
@@ -0,0 +1,51 @@
+package com.taosdata.jdbc;
+
+import java.sql.*;
+
+public class AbstractStatementWrapper extends AbstractStatement{
+ protected Statement statement;
+
+ public AbstractStatementWrapper(Statement statement) {
+ this.statement = statement;
+ }
+
+ @Override
+ public ResultSet executeQuery(String sql) throws SQLException {
+ return statement.executeQuery(sql);
+ }
+
+ @Override
+ public int executeUpdate(String sql) throws SQLException {
+ return statement.executeUpdate(sql);
+ }
+
+ @Override
+ public void close() throws SQLException {
+ statement.close();
+ }
+
+ @Override
+ public boolean execute(String sql) throws SQLException {
+ return statement.execute(sql);
+ }
+
+ @Override
+ public ResultSet getResultSet() throws SQLException {
+ return statement.getResultSet();
+ }
+
+ @Override
+ public int getUpdateCount() throws SQLException {
+ return statement.getUpdateCount();
+ }
+
+ @Override
+ public Connection getConnection() throws SQLException {
+ return statement.getConnection();
+ }
+
+ @Override
+ public boolean isClosed() throws SQLException {
+ return statement.isClosed();
+ }
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java
new file mode 100644
index 0000000000000000000000000000000000000000..748891d943536b3cb6ebd6adffd295573adee4d1
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SchemalessStatement.java
@@ -0,0 +1,58 @@
+package com.taosdata.jdbc;
+
+import com.taosdata.jdbc.enums.SchemalessProtocolType;
+import com.taosdata.jdbc.enums.SchemalessTimestampType;
+import com.taosdata.jdbc.rs.RestfulConnection;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+/**
+ * @author huolibo@qq.com
+ * @version v1.0.0
+ * @JDK: 1.8
+ * @description: this class is an extension of {@link Statement}. use like:
+ * Statement statement = conn.createStatement();
+ * SchemalessStatement schemalessStatement = new SchemalessStatement(statement);
+ * schemalessStatement.execute(sql);
+ * schemalessStatement.executeSchemaless(lines, SchemalessProtocolType, SchemalessTimestampType);
+ * @since 2021-11-03 17:10
+ */
+public class SchemalessStatement extends AbstractStatementWrapper {
+ public SchemalessStatement(Statement statement) {
+ super(statement);
+ }
+
+ /**
+ * batch insert schemaless lines
+ *
+ * @param lines schemaless data
+ * @param protocolType schemaless type {@link SchemalessProtocolType}
+ * @param timestampType Time precision {@link SchemalessTimestampType}
+ * @throws SQLException execute insert exception
+ */
+ public void executeSchemaless(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
+ Connection connection = this.getConnection();
+ if (connection instanceof TSDBConnection) {
+ TSDBConnection tsdbConnection = (TSDBConnection) connection;
+ tsdbConnection.getConnector().insertLines(lines, protocolType, timestampType);
+ } else if (connection instanceof RestfulConnection) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD, "restful connection is not supported currently");
+ } else {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "unknown connection:" + connection.getMetaData().getURL());
+ }
+ }
+
+ /**
+ * only one insert
+ *
+ * @param line schemaless line
+ * @param protocolType schemaless type {@link SchemalessProtocolType}
+ * @param timestampType Time precision {@link SchemalessTimestampType}
+ * @throws SQLException execute insert exception
+ */
+ public void executeSchemaless(String line, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
+ executeSchemaless(new String[]{line}, protocolType, timestampType);
+ }
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
index 307451e014c59c1c3419f1a9daff4f89e8b90d46..0fef64a6f82706e30677ad4e74604924c5cc2e60 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBDriver.java
@@ -135,7 +135,6 @@ public class TSDBDriver extends AbstractDriver {
TSDBJNIConnector.init(props);
return new TSDBConnection(props, this.dbMetaData);
} catch (SQLWarning sqlWarning) {
- sqlWarning.printStackTrace();
return new TSDBConnection(props, this.dbMetaData);
} catch (SQLException sqlEx) {
throw sqlEx;
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
index 0f4427fa20e272917df0327552efd1a80cd56b4d..1c380fed7dac2c54655830eef6f575e9c07e22af 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java
@@ -32,6 +32,7 @@ public class TSDBErrorNumbers {
public static final int ERROR_USER_IS_REQUIRED = 0x2319; // user is required
public static final int ERROR_PASSWORD_IS_REQUIRED = 0x231a; // password is required
public static final int ERROR_INVALID_JSON_FORMAT = 0x231b;
+ public static final int ERROR_HTTP_ENTITY_IS_NULL = 0x231c; //http entity is null
public static final int ERROR_UNKNOWN = 0x2350; //unknown error
@@ -74,6 +75,7 @@ public class TSDBErrorNumbers {
errorNumbers.add(ERROR_USER_IS_REQUIRED);
errorNumbers.add(ERROR_PASSWORD_IS_REQUIRED);
errorNumbers.add(ERROR_INVALID_JSON_FORMAT);
+ errorNumbers.add(ERROR_HTTP_ENTITY_IS_NULL);
errorNumbers.add(ERROR_RESTFul_Client_Protocol_Exception);
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
index aaada2e78ec284f4019b29465a38db109cf9d80a..a5c7f26a266f81e3a7915503d2983efe077765c2 100755
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java
@@ -17,6 +17,8 @@
package com.taosdata.jdbc;
import com.alibaba.fastjson.JSONObject;
+import com.taosdata.jdbc.enums.SchemalessProtocolType;
+import com.taosdata.jdbc.enums.SchemalessTimestampType;
import com.taosdata.jdbc.utils.TaosInfo;
import java.nio.ByteBuffer;
@@ -359,14 +361,14 @@ public class TSDBJNIConnector {
private native int closeStmt(long stmt, long con);
- public void insertLines(String[] lines) throws SQLException {
- int code = insertLinesImp(lines, this.taos);
+ public void insertLines(String[] lines, SchemalessProtocolType protocolType, SchemalessTimestampType timestampType) throws SQLException {
+ int code = insertLinesImp(lines, this.taos, protocolType.ordinal(), timestampType.ordinal());
if (code != TSDBConstants.JNI_SUCCESS) {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNKNOWN, "failed to insertLines");
}
}
- private native int insertLinesImp(String[] lines, long conn);
+ private native int insertLinesImp(String[] lines, long conn, int type, int precision);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
index 22fb0c4ae4987ade0a406fe5628bf80d975f3ae5..42ebedf4027b0e333b9e79b8045f1bae0d338ac7 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java
@@ -36,15 +36,15 @@ import java.util.regex.Pattern;
* compatibility needs.
*/
public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement {
+ // for jdbc preparedStatement interface
private String rawSql;
private Object[] parameters;
-
- private ArrayList colData;
+ // for parameter binding
+ private long nativeStmtHandle = 0;
+ private String tableName;
private ArrayList tableTags;
private int tagValueLength;
-
- private String tableName;
- private long nativeStmtHandle = 0;
+ private ArrayList colData;
TSDBPreparedStatement(TSDBConnection connection, String sql) {
super(connection);
@@ -72,10 +72,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
preprocessSql();
}
- /*
- *
- */
-
/**
* Some of the SQLs sent by other popular frameworks or tools like Spark, contains syntax that cannot be parsed by
* the TDengine client. Thus, some simple parsers/filters are intentionally added in this JDBC implementation in
@@ -250,13 +246,10 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
- if (isClosed()) {
+ if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- }
-
- if (parameterIndex < 1 && parameterIndex >= parameters.length) {
+ if (parameterIndex < 1 && parameterIndex >= parameters.length)
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE);
- }
parameters[parameterIndex - 1] = x;
}
@@ -335,7 +328,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- // TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -419,7 +411,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
- //TODO:
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
}
@@ -477,7 +468,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
if (isClosed())
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED);
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD);
-
}
@Override
@@ -496,7 +486,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
///////////////////////////////////////////////////////////////////////
// NOTE: the following APIs are not JDBC compatible
- // set the bind table name
+ // parameter binding
private static class ColumnInfo {
@SuppressWarnings("rawtypes")
private ArrayList data;
@@ -539,7 +529,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
}
}
- public void setTableName(String name) {
+ public void setTableName(String name) throws SQLException {
+ if (this.tableName != null) {
+ this.columnDataExecuteBatch();
+ this.columnDataClearBatchInternal();
+ }
this.tableName = name;
}
@@ -960,17 +954,22 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
public void columnDataExecuteBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.executeBatch(this.nativeStmtHandle);
- this.columnDataClearBatch();
+ this.columnDataClearBatchInternal();
}
+ @Deprecated
public void columnDataClearBatch() {
+ columnDataClearBatchInternal();
+ }
+
+ private void columnDataClearBatchInternal() {
int size = this.colData.size();
this.colData.clear();
-
this.colData.addAll(Collections.nCopies(size, null));
this.tableName = null; // clear the table name
}
+
public void columnDataCloseBatch() throws SQLException {
TSDBJNIConnector connector = ((TSDBConnection) this.getConnection()).getConnector();
connector.closeBatch(this.nativeStmtHandle);
@@ -978,4 +977,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
this.nativeStmtHandle = 0L;
this.tableName = null;
}
+
+ @Override
+ public void close() throws SQLException {
+ this.columnDataClearBatchInternal();
+ this.columnDataCloseBatch();
+ super.close();
+ }
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
index 00a62206fc7861a87177d14cc4b274c464dc4184..3814186f779203741001943efe47b85c0be83acb 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java
@@ -19,6 +19,7 @@ import com.google.common.primitives.Longs;
import com.google.common.primitives.Shorts;
import java.math.BigDecimal;
+import java.nio.charset.StandardCharsets;
import java.sql.*;
import java.util.ArrayList;
import java.util.Calendar;
@@ -256,7 +257,11 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
public byte[] getBytes(int columnIndex) throws SQLException {
checkAvailability(columnIndex, this.columnMetaDataList.size());
+ if (this.getBatchFetch())
+ return this.blockData.getString(columnIndex).getBytes();
+
Object value = this.rowData.getObject(columnIndex);
+ this.lastWasNull = value == null;
if (value == null)
return null;
@@ -331,25 +336,26 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
return new BigDecimal(this.blockData.getLong(columnIndex - 1));
this.lastWasNull = this.rowData.wasNull(columnIndex);
- BigDecimal res = null;
- if (!lastWasNull) {
- int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
- switch (nativeType) {
- case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
- case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
- case TSDBConstants.TSDB_DATA_TYPE_INT:
- case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
- res = new BigDecimal(Long.parseLong(this.rowData.getObject(columnIndex).toString()));
- break;
- case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
- case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
- res = BigDecimal.valueOf(Double.parseDouble(this.rowData.getObject(columnIndex).toString()));
- break;
- case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
- return new BigDecimal(((Timestamp) this.rowData.getObject(columnIndex)).getTime());
- default:
- res = new BigDecimal(this.rowData.getObject(columnIndex).toString());
- }
+ if (lastWasNull)
+ return null;
+
+ BigDecimal res;
+ int nativeType = this.columnMetaDataList.get(columnIndex - 1).getColType();
+ switch (nativeType) {
+ case TSDBConstants.TSDB_DATA_TYPE_TINYINT:
+ case TSDBConstants.TSDB_DATA_TYPE_SMALLINT:
+ case TSDBConstants.TSDB_DATA_TYPE_INT:
+ case TSDBConstants.TSDB_DATA_TYPE_BIGINT:
+ res = new BigDecimal(Long.parseLong(this.rowData.getObject(columnIndex).toString()));
+ break;
+ case TSDBConstants.TSDB_DATA_TYPE_FLOAT:
+ case TSDBConstants.TSDB_DATA_TYPE_DOUBLE:
+ res = BigDecimal.valueOf(Double.parseDouble(this.rowData.getObject(columnIndex).toString()));
+ break;
+ case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP:
+ return new BigDecimal(((Timestamp) this.rowData.getObject(columnIndex)).getTime());
+ default:
+ res = new BigDecimal(this.rowData.getObject(columnIndex).toString());
}
return res;
}
@@ -465,12 +471,6 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet {
public boolean isClosed() throws SQLException {
return isClosed;
-// if (isClosed)
-// return true;
-// if (jniConnector != null) {
-// isClosed = jniConnector.isResultsetClosed();
-// }
-// return isClosed;
}
public String getNString(int columnIndex) throws SQLException {
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java
index 48854e773f89a45784de3cd709ec5bbe6185e09b..0a9f017cbbd775cf710f3bac4440ee8a43403870 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetWrapper.java
@@ -23,7 +23,7 @@ import java.util.Calendar;
import java.util.Map;
/*
- * TDengine only supports a subset of the standard SQL, thus this implemetation of the
+ * TDengine only supports a subset of the standard SQL, thus this implementation of the
* standard JDBC API contains more or less some adjustments customized for certain
* compatibility needs.
*/
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
index 436bdcf582b821292c5f4e69f51688f9bf84b870..2c77df2981e18931d6cb56cca84bb2115716b349 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java
@@ -84,8 +84,7 @@ public class TSDBStatement extends AbstractStatement {
long pSql = this.connection.getConnector().executeQuery(sql);
// if pSql is create/insert/update/delete/alter SQL
if (this.connection.getConnector().isUpdateQuery(pSql)) {
- int rows = this.connection.getConnector().getAffectedRows(pSql);
- this.affectedRows = rows == 0 ? -1 : this.connection.getConnector().getAffectedRows(pSql);
+ this.affectedRows = this.connection.getConnector().getAffectedRows(pSql);
this.connection.getConnector().freeResultSet(pSql);
return false;
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java
new file mode 100644
index 0000000000000000000000000000000000000000..2a0bea15702a79b3440f95771cf56b879a814626
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessProtocolType.java
@@ -0,0 +1,18 @@
+package com.taosdata.jdbc.enums;
+
+import java.util.Arrays;
+
+public enum SchemalessProtocolType {
+ UNKNOWN,
+ LINE,
+ TELNET,
+ JSON,
+ ;
+
+ public static SchemalessProtocolType parse(String type) {
+ return Arrays.stream(SchemalessProtocolType.values())
+ .filter(protocol -> type.equalsIgnoreCase(protocol.name()))
+ .findFirst().orElse(UNKNOWN);
+ }
+
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java
new file mode 100644
index 0000000000000000000000000000000000000000..fa10a23634ec75182365d42ebfb79aff7b14b08f
--- /dev/null
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/enums/SchemalessTimestampType.java
@@ -0,0 +1,13 @@
+package com.taosdata.jdbc.enums;
+
+public enum SchemalessTimestampType {
+ // Let the database decide
+ NOT_CONFIGURED,
+ HOURS,
+ MINUTES,
+ SECONDS,
+ MILLI_SECONDS,
+ MICRO_SECONDS,
+ NANO_SECONDS,
+ ;
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
index d5985756ee1851407bf19a568657fa2127d0be43..36714893e3ca519dea07910a95d5ee1c1b6fb731 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java
@@ -50,9 +50,13 @@ public class RestfulDriver extends AbstractDriver {
String password = URLEncoder.encode(props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD), StandardCharsets.UTF_8.displayName());
loginUrl = "http://" + props.getProperty(TSDBDriver.PROPERTY_KEY_HOST) + ":" + props.getProperty(TSDBDriver.PROPERTY_KEY_PORT) + "/rest/login/" + user + "/" + password + "";
} catch (UnsupportedEncodingException e) {
- e.printStackTrace();
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE, "unsupported UTF-8 concoding, user: " + props.getProperty(TSDBDriver.PROPERTY_KEY_USER) + ", password: " + props.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
}
+ int poolSize = Integer.valueOf(props.getProperty("httpPoolSize", HttpClientPoolUtil.DEFAULT_MAX_PER_ROUTE));
+ boolean keepAlive = Boolean.valueOf(props.getProperty("httpKeepAlive", HttpClientPoolUtil.DEFAULT_HTTP_KEEP_ALIVE));
+
+ HttpClientPoolUtil.init(poolSize, keepAlive);
String result = HttpClientPoolUtil.execute(loginUrl);
JSONObject jsonResult = JSON.parseObject(result);
String status = jsonResult.getString("status");
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
index 1ea39236b666fda106c3ee3534560b6380d7bec9..f3e3f138df8fc854817c0adf57c5f5453f52bf05 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java
@@ -255,6 +255,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null)
return null;
if (value instanceof byte[])
@@ -267,11 +268,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
- if (value == null) {
- wasNull = true;
+ wasNull = value == null;
+ if (value == null)
return false;
- }
- wasNull = false;
if (value instanceof Boolean)
return (boolean) value;
return Boolean.parseBoolean(value.toString());
@@ -282,11 +281,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
- if (value == null) {
- wasNull = true;
+ wasNull = value == null;
+ if (value == null)
return 0;
- }
- wasNull = false;
long valueAsLong = Long.parseLong(value.toString());
if (valueAsLong == Byte.MIN_VALUE)
return 0;
@@ -306,11 +303,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
- if (value == null) {
- wasNull = true;
+ wasNull = value == null;
+ if (value == null)
return 0;
- }
- wasNull = false;
long valueAsLong = Long.parseLong(value.toString());
if (valueAsLong == Short.MIN_VALUE)
return 0;
@@ -324,11 +319,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
- if (value == null) {
- wasNull = true;
+ wasNull = value == null;
+ if (value == null)
return 0;
- }
- wasNull = false;
long valueAsLong = Long.parseLong(value.toString());
if (valueAsLong == Integer.MIN_VALUE)
return 0;
@@ -342,15 +335,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
- if (value == null) {
- wasNull = true;
+ wasNull = value == null;
+ if (value == null)
return 0;
- }
-
- wasNull = false;
- if (value instanceof Timestamp) {
+ if (value instanceof Timestamp)
return ((Timestamp) value).getTime();
- }
long valueAsLong = 0;
try {
valueAsLong = Long.parseLong(value.toString());
@@ -367,11 +356,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
- if (value == null) {
- wasNull = true;
+ wasNull = value == null;
+ if (value == null)
return 0;
- }
- wasNull = false;
if (value instanceof Float)
return (float) value;
if (value instanceof Double)
@@ -384,11 +371,10 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null) {
- wasNull = true;
return 0;
}
- wasNull = false;
if (value instanceof Double || value instanceof Float)
return (double) value;
return Double.parseDouble(value.toString());
@@ -399,6 +385,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null)
return null;
if (value instanceof byte[])
@@ -425,6 +412,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null)
return null;
if (value instanceof Timestamp)
@@ -437,6 +425,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null)
return null;
if (value instanceof Timestamp)
@@ -454,6 +443,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null)
return null;
if (value instanceof Timestamp)
@@ -470,6 +460,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
ret = Utils.parseTimestamp(value.toString());
} catch (Exception e) {
ret = null;
+ wasNull = true;
}
return ret;
}
@@ -485,7 +476,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
public Object getObject(int columnIndex) throws SQLException {
checkAvailability(columnIndex, resultSet.get(pos).size());
- return resultSet.get(pos).get(columnIndex - 1);
+ Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
+ return value;
}
@Override
@@ -504,9 +497,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
checkAvailability(columnIndex, resultSet.get(pos).size());
Object value = resultSet.get(pos).get(columnIndex - 1);
+ wasNull = value == null;
if (value == null)
return null;
-
if (value instanceof Long || value instanceof Integer || value instanceof Short || value instanceof Byte)
return new BigDecimal(Long.parseLong(value.toString()));
if (value instanceof Double || value instanceof Float)
@@ -663,4 +656,4 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet {
return isClosed;
}
-}
\ No newline at end of file
+}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
index b7f5fe8006368295753a366aa218a6cc17aa0588..fb8b82271b02b70b348b43a7c88a0084adaa5ab5 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulStatement.java
@@ -140,8 +140,7 @@ public class RestfulStatement extends AbstractStatement {
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
JSONArray data = jsonObject.getJSONArray("data");
if (data != null) {
- int rows = data.getJSONArray(0).getInteger(0);
- return rows == 0 ? -1 : data.getJSONArray(0).getInteger(0);
+ return data.getJSONArray(0).getInteger(0);
}
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_VARIABLE);
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
index de26ab7f1f458a4587ce15bebab3c2c1b0dbc070..fc116b32c2a154c9479e4933d887ac7ddcedbe9f 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/HttpClientPoolUtil.java
@@ -6,10 +6,10 @@ import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
import org.apache.http.client.ClientProtocolException;
-import org.apache.http.client.HttpRequestRetryHandler;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
import org.apache.http.client.protocol.HttpClientContext;
+import org.apache.http.conn.ClientConnectionManager;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
@@ -20,22 +20,21 @@ import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.util.EntityUtils;
-import javax.net.ssl.SSLException;
import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
+import java.util.concurrent.TimeUnit;
public class HttpClientPoolUtil {
private static final String DEFAULT_CONTENT_TYPE = "application/json";
- private static final int DEFAULT_MAX_TOTAL = 200;
- private static final int DEFAULT_MAX_PER_ROUTE = 20;
- private static final int DEFAULT_TIME_OUT = 15000;
- private static final int DEFAULT_HTTP_KEEP_TIME = 15000;
private static final int DEFAULT_MAX_RETRY_COUNT = 5;
+ public static final String DEFAULT_HTTP_KEEP_ALIVE = "true";
+ public static final String DEFAULT_MAX_PER_ROUTE = "20";
+ private static final int DEFAULT_HTTP_KEEP_TIME = -1;
+ private static String isKeepAlive;
+
private static final ConnectionKeepAliveStrategy DEFAULT_KEEP_ALIVE_STRATEGY = (response, context) -> {
HeaderElementIterator it = new BasicHeaderElementIterator(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
@@ -52,48 +51,41 @@ public class HttpClientPoolUtil {
return DEFAULT_HTTP_KEEP_TIME * 1000;
};
- private static final HttpRequestRetryHandler retryHandler = (exception, executionCount, httpContext) -> {
- if (executionCount >= DEFAULT_MAX_RETRY_COUNT)
- // do not retry if over max retry count
- return false;
- if (exception instanceof InterruptedIOException)
- // timeout
- return false;
- if (exception instanceof UnknownHostException)
- // unknown host
- return false;
- if (exception instanceof SSLException)
- // SSL handshake exception
- return false;
- return true;
- };
-
private static CloseableHttpClient httpClient;
- static {
- PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
- connectionManager.setMaxTotal(DEFAULT_MAX_TOTAL);
- connectionManager.setDefaultMaxPerRoute(DEFAULT_MAX_PER_ROUTE);
- httpClient = HttpClients.custom().setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY).setConnectionManager(connectionManager).setRetryHandler(retryHandler).build();
+ public static void init(Integer connPoolSize, boolean keepAlive) {
+ if (httpClient == null) {
+ synchronized (HttpClientPoolUtil.class) {
+ if (httpClient == null) {
+ isKeepAlive = keepAlive ? HTTP.CONN_KEEP_ALIVE : HTTP.CONN_CLOSE;
+ PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
+ connectionManager.setMaxTotal(connPoolSize * 10);
+ connectionManager.setDefaultMaxPerRoute(connPoolSize);
+ httpClient = HttpClients.custom()
+ .setKeepAliveStrategy(DEFAULT_KEEP_ALIVE_STRATEGY)
+ .setConnectionManager(connectionManager)
+ .setRetryHandler((exception, executionCount, httpContext) -> executionCount < DEFAULT_MAX_RETRY_COUNT)
+ .build();
+ }
+ }
+ }
}
/*** execute GET request ***/
public static String execute(String uri) throws SQLException {
HttpEntity httpEntity = null;
String responseBody = "";
- try {
- HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
- HttpContext context = HttpClientContext.create();
- CloseableHttpResponse httpResponse = httpClient.execute(method, context);
+ HttpRequestBase method = getRequest(uri, HttpGet.METHOD_NAME);
+ HttpContext context = HttpClientContext.create();
+
+ try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) {
httpEntity = httpResponse.getEntity();
if (httpEntity != null) {
responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
}
} catch (ClientProtocolException e) {
- e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) {
- exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally {
if (httpEntity != null) {
@@ -103,29 +95,27 @@ public class HttpClientPoolUtil {
return responseBody;
}
-
/*** execute POST request ***/
public static String execute(String uri, String data, String token) throws SQLException {
+
+ HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
+ method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
+ method.setHeader(HTTP.CONN_DIRECTIVE, isKeepAlive);
+ method.setHeader("Authorization", "Taosd " + token);
+ method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
+ HttpContext context = HttpClientContext.create();
+
HttpEntity httpEntity = null;
String responseBody = "";
- try {
- HttpEntityEnclosingRequestBase method = (HttpEntityEnclosingRequestBase) getRequest(uri, HttpPost.METHOD_NAME);
- method.setHeader(HTTP.CONTENT_TYPE, "text/plain");
- method.setHeader(HTTP.CONN_DIRECTIVE, HTTP.CONN_KEEP_ALIVE);
- method.setHeader("Authorization", "Taosd " + token);
-
- method.setEntity(new StringEntity(data, StandardCharsets.UTF_8));
- HttpContext context = HttpClientContext.create();
- CloseableHttpResponse httpResponse = httpClient.execute(method, context);
+ try (CloseableHttpResponse httpResponse = httpClient.execute(method, context)) {
httpEntity = httpResponse.getEntity();
- if (httpEntity != null) {
- responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
+ if (httpEntity == null) {
+ throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_HTTP_ENTITY_IS_NULL, "httpEntity is null, sql: " + data);
}
+ responseBody = EntityUtils.toString(httpEntity, StandardCharsets.UTF_8);
} catch (ClientProtocolException e) {
- e.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_Protocol_Exception, e.getMessage());
} catch (IOException exception) {
- exception.printStackTrace();
throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESTFul_Client_IOException, exception.getMessage());
} finally {
if (httpEntity != null) {
@@ -139,9 +129,6 @@ public class HttpClientPoolUtil {
private static HttpRequestBase getRequest(String uri, String methodName) {
HttpRequestBase method;
RequestConfig requestConfig = RequestConfig.custom()
- .setSocketTimeout(DEFAULT_TIME_OUT * 1000)
- .setConnectTimeout(DEFAULT_TIME_OUT * 1000)
- .setConnectionRequestTimeout(DEFAULT_TIME_OUT * 1000)
.setExpectContinueEnabled(false)
.build();
if (HttpPut.METHOD_NAME.equalsIgnoreCase(methodName)) {
@@ -159,4 +146,12 @@ public class HttpClientPoolUtil {
return method;
}
+
+ public static void reset() {
+ synchronized (HttpClientPoolUtil.class) {
+ ClientConnectionManager cm = httpClient.getConnectionManager();
+ cm.closeExpiredConnections();
+ cm.closeIdleConnections(100, TimeUnit.MILLISECONDS);
+ }
+ }
}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java
index a427103770cff7f51355024688454824d7263c77..d4664f2678013b3de87bcd3f0dc24631be511ede 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/TaosInfo.java
@@ -16,7 +16,6 @@ public class TaosInfo implements TaosInfoMBean {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
ObjectName name = new ObjectName("TaosInfoMBean:name=TaosInfo");
server.registerMBean(TaosInfo.getInstance(), name);
-
} catch (MalformedObjectNameException | InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) {
e.printStackTrace();
}
diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
index 9cf61903f001e84f237e25c3c10fdbb8aac28fd7..6cd1ff7200962b7347969e0b8b10443083505912 100644
--- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
+++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/Utils.java
@@ -49,14 +49,9 @@ public class Utils {
try {
return parseMicroSecTimestamp(timeStampStr);
} catch (DateTimeParseException ee) {
- try {
- return parseNanoSecTimestamp(timeStampStr);
- } catch (DateTimeParseException eee) {
- eee.printStackTrace();
- }
+ return parseNanoSecTimestamp(timeStampStr);
}
}
- return null;
}
private static LocalDateTime parseMilliSecTimestamp(String timeStampStr) throws DateTimeParseException {
@@ -121,7 +116,7 @@ public class Utils {
}
private static void findValuesClauseRangeSet(String preparedSql, RangeSet clauseRangeSet) {
- Matcher matcher = Pattern.compile("(values|,)\\s*(\\([^)]*\\))").matcher(preparedSql);
+ Matcher matcher = Pattern.compile("(values||,)\\s*(\\([^)]*\\))").matcher(preparedSql);
while (matcher.find()) {
int start = matcher.start(2);
int end = matcher.end(2);
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..46f201d1c0a525f52014d133e25fc0db4741050c
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ParameterBindTest.java
@@ -0,0 +1,139 @@
+package com.taosdata.jdbc;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.stream.Collectors;
+
+public class ParameterBindTest {
+
+ private static final String host = "127.0.0.1";
+ private static final String stable = "weather";
+
+ private Connection conn;
+ private final Random random = new Random(System.currentTimeMillis());
+
+ @Test
+ public void test() {
+ // given
+ String[] tbnames = {"t1", "t2", "t3"};
+ int rows = 10;
+
+ // when
+ insertIntoTables(tbnames, 10);
+
+ // then
+ assertRows(stable, tbnames.length * rows);
+ for (String t : tbnames) {
+ assertRows(t, rows);
+ }
+ }
+
+ @Test
+ public void testMultiThreads() {
+ // given
+ String[][] tables = {{"t1", "t2", "t3"}, {"t4", "t5", "t6"}, {"t7", "t8", "t9"}, {"t10"}};
+ int rows = 10;
+
+ // when
+ List threads = Arrays.stream(tables).map(tbnames -> new Thread(() -> insertIntoTables(tbnames, rows))).collect(Collectors.toList());
+ threads.forEach(Thread::start);
+ for (Thread thread : threads) {
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ // then
+ for (String[] table : tables) {
+ for (String t : table) {
+ assertRows(t, rows);
+ }
+ }
+
+ }
+
+ private void assertRows(String tbname, int rows) {
+ try (Statement stmt = conn.createStatement()) {
+ ResultSet rs = stmt.executeQuery("select count(*) from " + tbname);
+ while (rs.next()) {
+ int count = rs.getInt(1);
+ Assert.assertEquals(rows, count);
+ }
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void insertIntoTables(String[] tbnames, int rowsEachTable) {
+ long current = System.currentTimeMillis();
+ String sql = "insert into ? using " + stable + " tags(?, ?) values(?, ?, ?)";
+ try (TSDBPreparedStatement pstmt = conn.prepareStatement(sql).unwrap(TSDBPreparedStatement.class)) {
+ for (int i = 0; i < tbnames.length; i++) {
+ pstmt.setTableName(tbnames[i]);
+ pstmt.setTagInt(0, random.nextInt(100));
+ pstmt.setTagInt(1, random.nextInt(100));
+
+ ArrayList timestampList = new ArrayList<>();
+ for (int j = 0; j < rowsEachTable; j++) {
+ timestampList.add(current + i * 1000 + j);
+ }
+ pstmt.setTimestamp(0, timestampList);
+
+ ArrayList f1List = new ArrayList<>();
+ for (int j = 0; j < rowsEachTable; j++) {
+ f1List.add(random.nextInt(100));
+ }
+ pstmt.setInt(1, f1List);
+
+ ArrayList f2List = new ArrayList<>();
+ for (int j = 0; j < rowsEachTable; j++) {
+ f2List.add(random.nextInt(100));
+ }
+ pstmt.setInt(2, f2List);
+
+ pstmt.columnDataAddBatch();
+ }
+
+ pstmt.columnDataExecuteBatch();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Before
+ public void before() {
+ String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ try {
+ conn = DriverManager.getConnection(url);
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists test_pd");
+ stmt.execute("create database if not exists test_pd");
+ stmt.execute("use test_pd");
+ stmt.execute("create table " + stable + "(ts timestamp, f1 int, f2 int) tags(t1 int, t2 int)");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @After
+ public void after() {
+ try {
+// Statement stmt = conn.createStatement();
+// stmt.execute("drop database if exists test_pd");
+ if (conn != null)
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..fd4ac12ce40dc02f2b6ffbf91e33b0e0bd2398a9
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/SchemalessInsertTest.java
@@ -0,0 +1,163 @@
+package com.taosdata.jdbc;
+
+import com.alibaba.fastjson.JSONArray;
+import com.alibaba.fastjson.JSONObject;
+import com.taosdata.jdbc.enums.SchemalessProtocolType;
+import com.taosdata.jdbc.enums.SchemalessTimestampType;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.sql.*;
+
+public class SchemalessInsertTest {
+ private final String dbname = "test_schemaless_insert";
+ private Connection conn;
+
+ /**
+ * schemaless insert compatible with influxdb
+ *
+ * @throws SQLException execute error
+ */
+ @Test
+ public void schemalessInsert() throws SQLException {
+ // given
+ String[] lines = new String[]{
+ "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000"};
+ // when
+ try (Statement statement = conn.createStatement();
+ SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
+ schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
+ }
+
+ // then
+ Statement statement = conn.createStatement();
+ ResultSet rs = statement.executeQuery("show tables");
+ Assert.assertNotNull(rs);
+ ResultSetMetaData metaData = rs.getMetaData();
+ Assert.assertTrue(metaData.getColumnCount() > 0);
+ int rowCnt = 0;
+ while (rs.next()) {
+ rowCnt++;
+ }
+ Assert.assertEquals(lines.length, rowCnt);
+ rs.close();
+ statement.close();
+ }
+
+ /**
+ * telnet insert compatible with opentsdb
+ *
+ * @throws SQLException execute error
+ */
+ @Test
+ public void telnetInsert() throws SQLException {
+ // given
+ String[] lines = new String[]{
+ "stb0_0 1626006833 4 host=host0 interface=eth0",
+ "stb0_1 1626006833 4 host=host0 interface=eth0",
+ "stb0_2 1626006833 4 host=host0 interface=eth0 id=\"special_name\"",
+ };
+
+ // when
+ try (Statement statement = conn.createStatement();
+ SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
+ schemalessStatement.executeSchemaless(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
+ }
+
+ // then
+ Statement statement = conn.createStatement();
+ ResultSet rs = statement.executeQuery("show tables");
+ Assert.assertNotNull(rs);
+ ResultSetMetaData metaData = rs.getMetaData();
+ Assert.assertTrue(metaData.getColumnCount() > 0);
+ int rowCnt = 0;
+ while (rs.next()) {
+ rowCnt++;
+ }
+ Assert.assertEquals(lines.length, rowCnt);
+ rs.close();
+ statement.close();
+ }
+
+ /**
+ * json insert compatible with opentsdb json format
+ *
+ * @throws SQLException execute error
+ */
+ @Test
+ public void jsonInsert() throws SQLException {
+ // given
+ String json = "[\n" +
+ " {\n" +
+ " \"metric\": \"cpu_load_1\",\n" +
+ " \"timestamp\": 1626006833,\n" +
+ " \"value\": 55.5,\n" +
+ " \"tags\": {\n" +
+ " \"host\": \"ubuntu\",\n" +
+ " \"interface\": \"eth1\",\n" +
+ " \"Id\": \"tb1\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
+ " \"metric\": \"cpu_load_2\",\n" +
+ " \"timestamp\": 1626006834,\n" +
+ " \"value\": 55.5,\n" +
+ " \"tags\": {\n" +
+ " \"host\": \"ubuntu\",\n" +
+ " \"interface\": \"eth2\",\n" +
+ " \"Id\": \"tb2\"\n" +
+ " }\n" +
+ " }\n" +
+ "]";
+
+ // when
+ try (Statement statement = conn.createStatement();
+ SchemalessStatement schemalessStatement = new SchemalessStatement(statement)) {
+ schemalessStatement.executeSchemaless(json, SchemalessProtocolType.JSON, SchemalessTimestampType.NOT_CONFIGURED);
+ }
+
+ // then
+ Statement statement = conn.createStatement();
+ ResultSet rs = statement.executeQuery("show tables");
+ Assert.assertNotNull(rs);
+ ResultSetMetaData metaData = rs.getMetaData();
+ Assert.assertTrue(metaData.getColumnCount() > 0);
+ int rowCnt = 0;
+ while (rs.next()) {
+ rowCnt++;
+ }
+
+ Assert.assertEquals(((JSONArray) JSONObject.parse(json)).size(), rowCnt);
+ rs.close();
+ statement.close();
+ }
+
+ @Before
+ public void before() {
+ String host = "127.0.0.1";
+ final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata";
+ try {
+ conn = DriverManager.getConnection(url);
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists " + dbname);
+ stmt.execute("create database if not exists " + dbname + " precision 'ns'");
+ stmt.execute("use " + dbname);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @After
+ public void after() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop database if exists " + dbname);
+ stmt.close();
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java
index 8be6ae6b1c566abcd7ec398e7df3f5308e29e1b1..f44d647595e99ae00a355ca25f702cf2e0c1cc36 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBJNIConnectorTest.java
@@ -1,5 +1,7 @@
package com.taosdata.jdbc;
+import com.taosdata.jdbc.enums.SchemalessProtocolType;
+import com.taosdata.jdbc.enums.SchemalessTimestampType;
import org.junit.Test;
import java.lang.management.ManagementFactory;
@@ -115,9 +117,10 @@ public class TSDBJNIConnectorTest {
}
// close statement
connector.executeQuery("use d");
- String[] lines = new String[]{"st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
- "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns"};
- connector.insertLines(lines);
+ String[] lines = new String[]{
+ "st,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ "st,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000"};
+ connector.insertLines(lines, SchemalessProtocolType.LINE, SchemalessTimestampType.NANO_SECONDS);
// close connection
connector.closeConnection();
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..30fc2fa76597c30b905db5c9d49815189d71aaa3
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/HttpKeepAliveTest.java
@@ -0,0 +1,57 @@
+package com.taosdata.jdbc.rs;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class HttpKeepAliveTest {
+
+ private static final String host = "127.0.0.1";
+
+ @Test
+ public void test() throws SQLException {
+ //given
+ int multi = 4000;
+ AtomicInteger exceptionCount = new AtomicInteger();
+
+ //when
+ Properties props = new Properties();
+ props.setProperty("httpKeepAlive", "false");
+ props.setProperty("httpPoolSize", "20");
+ Connection connection = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata", props);
+
+ List threads = IntStream.range(0, multi).mapToObj(i -> new Thread(
+ () -> {
+ try (Statement stmt = connection.createStatement()) {
+ stmt.execute("insert into log.tb_not_exists values(now, 1)");
+ stmt.execute("select last(*) from log.dn");
+ } catch (SQLException throwables) {
+ exceptionCount.getAndIncrement();
+ }
+ }
+ )).collect(Collectors.toList());
+
+ threads.forEach(Thread::start);
+
+ for (Thread thread : threads) {
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ //then
+ Assert.assertEquals(multi, exceptionCount.get());
+ }
+
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java
index 21a91669b270df4dc2e8f7b4885fb9e8eedbfdf7..86b0f1be9e7ee99f50201dc98f197c07f5bb9aef 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java
@@ -660,7 +660,6 @@ public class RestfulResultSetTest {
@BeforeClass
public static void beforeClass() {
try {
- Class.forName("com.taosdata.jdbc.rs.RestfulDriver");
conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
stmt = conn.createStatement();
stmt.execute("create database if not exists restful_test");
@@ -670,7 +669,7 @@ public class RestfulResultSetTest {
stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')");
rs = stmt.executeQuery("select * from restful_test.weather");
rs.next();
- } catch (ClassNotFoundException | SQLException e) {
+ } catch (SQLException e) {
e.printStackTrace();
}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java
index 6b88de258dd4addda06cfb6e971b9d4dd267b7b4..98482ade80656f2e48bc6927953439cfe4b010c1 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java
@@ -518,7 +518,7 @@ public class SQLTest {
@Test
public void testCase050() {
- String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location";
+ String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts";
// when
ResultSet rs = executeQuery(connection, sql);
// then
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..a78284b7a2ecf1b43b96180fa9d819e89ecdc595
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/WasNullTest.java
@@ -0,0 +1,94 @@
+package com.taosdata.jdbc.rs;
+
+import org.junit.*;
+
+import java.sql.*;
+
+public class WasNullTest {
+
+ private static final String host = "127.0.0.1";
+ private Connection conn;
+
+
+ @Test
+ public void testGetTimestamp() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists weather");
+ stmt.execute("create table if not exists weather(f1 timestamp, f2 timestamp, f3 int)");
+ stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', NULL, 100)");
+
+ ResultSet rs = stmt.executeQuery("select * from restful_test.weather");
+ ResultSetMetaData meta = rs.getMetaData();
+ while (rs.next()) {
+ for (int i = 1; i <= meta.getColumnCount(); i++) {
+ if (i == 2) {
+ Object value = rs.getTimestamp(i);
+ boolean wasNull = rs.wasNull();
+ Assert.assertNull(value);
+ Assert.assertTrue(wasNull);
+ } else {
+ Object value = rs.getObject(i);
+ boolean wasNull = rs.wasNull();
+ Assert.assertNotNull(value);
+ Assert.assertFalse(wasNull);
+ }
+ }
+ }
+
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Test
+ public void testGetObject() {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("drop table if exists weather");
+ stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))");
+ stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, NULL, 10, 10, true, '涛思数据')");
+
+ ResultSet rs = stmt.executeQuery("select * from restful_test.weather");
+ ResultSetMetaData meta = rs.getMetaData();
+ while (rs.next()) {
+ for (int i = 1; i <= meta.getColumnCount(); i++) {
+ Object value = rs.getObject(i);
+ boolean wasNull = rs.wasNull();
+ if (i == 6) {
+ Assert.assertNull(value);
+ Assert.assertTrue(wasNull);
+ } else {
+ Assert.assertNotNull(value);
+ Assert.assertFalse(wasNull);
+ }
+ }
+ }
+
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Before
+ public void before() {
+ try {
+ conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/restful_test?user=root&password=taosdata");
+ Statement stmt = conn.createStatement();
+ stmt.execute("drop database if exists restful_test");
+ stmt.execute("create database if not exists restful_test");
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @After
+ public void after() {
+ try {
+ Statement statement = conn.createStatement();
+ statement.execute("drop database if exists restful_test");
+ if (conn != null)
+ conn.close();
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+}
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..7ba1607fdd32a594bca22528dee48d902736c703
--- /dev/null
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/HttpClientPoolUtilTest.java
@@ -0,0 +1,104 @@
+package com.taosdata.jdbc.utils;
+
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONObject;
+import org.junit.Test;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+public class HttpClientPoolUtilTest {
+
+ String user = "root";
+ String password = "taosdata";
+ String host = "127.0.0.1";
+// String host = "master";
+
+ @Test
+ public void useLog() {
+ // given
+ int multi = 10;
+
+ // when
+ List threads = IntStream.range(0, multi).mapToObj(i -> new Thread(() -> {
+ try {
+ String token = login(multi);
+ executeOneSql("use log", token);
+ } catch (SQLException | UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+ })).collect(Collectors.toList());
+
+ threads.forEach(Thread::start);
+
+ for (Thread thread : threads) {
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Test
+ public void tableNotExist() {
+ // given
+ int multi = 20;
+
+ // when
+ List threads = IntStream.range(0, multi * 25).mapToObj(i -> new Thread(() -> {
+ try {
+// String token = "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04";
+ String token = login(multi);
+ executeOneSql("insert into log.tb_not_exist values(now, 1)", token);
+ executeOneSql("select last(*) from log.dn", token);
+ } catch (SQLException | UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+ })).collect(Collectors.toList());
+
+ threads.forEach(Thread::start);
+
+ for (Thread thread : threads) {
+ try {
+ thread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ private String login(int connPoolSize) throws SQLException, UnsupportedEncodingException {
+ user = URLEncoder.encode(user, StandardCharsets.UTF_8.displayName());
+ password = URLEncoder.encode(password, StandardCharsets.UTF_8.displayName());
+ String loginUrl = "http://" + host + ":" + 6041 + "/rest/login/" + user + "/" + password + "";
+ HttpClientPoolUtil.init(connPoolSize, false);
+ String result = HttpClientPoolUtil.execute(loginUrl);
+ JSONObject jsonResult = JSON.parseObject(result);
+ String status = jsonResult.getString("status");
+ String token = jsonResult.getString("desc");
+ if (!status.equals("succ")) {
+ throw new SQLException(jsonResult.getString("desc"));
+ }
+ return token;
+ }
+
+ private boolean executeOneSql(String sql, String token) throws SQLException {
+ String url = "http://" + host + ":6041/rest/sql";
+ String result = HttpClientPoolUtil.execute(url, sql, token);
+ JSONObject resultJson = JSON.parseObject(result);
+ if (resultJson.getString("status").equals("error")) {
+// HttpClientPoolUtil.reset();
+// throw TSDBError.createSQLException(resultJson.getInteger("code"), resultJson.getString("desc"));
+ return false;
+ }
+ return true;
+ }
+
+
+}
\ No newline at end of file
diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
index 1cbd95b2492284b9c85f31bd6b6848d9c223df18..66ecb9d63beb7c57ffb992a9ba5999b8fb70e739 100644
--- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
+++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/UtilsTest.java
@@ -73,6 +73,48 @@ public class UtilsTest {
Assert.assertEquals(expected, actual);
}
+ @Test
+ public void multiValuesAndWhitespace() {
+ // given
+ String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?) (?,?,?,?) (?,?,?,?)";
+ Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
+
+ // when
+ String actual = Utils.getNativeSql(nativeSql, parameters);
+
+ // then
+ String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4) (200,3.1415,'xyz',5) (300,3.141592,'uvw',6)";
+ Assert.assertEquals(expected, actual);
+ }
+
+ @Test
+ public void multiValuesNoSeparator() {
+ // given
+ String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)(?,?,?,?)(?,?,?,?)";
+ Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
+
+ // when
+ String actual = Utils.getNativeSql(nativeSql, parameters);
+
+ // then
+ String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4)(200,3.1415,'xyz',5)(300,3.141592,'uvw',6)";
+ Assert.assertEquals(expected, actual);
+ }
+
+ @Test
+ public void multiValuesMultiSeparator() {
+ // given
+ String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?) (?,?,?,?), (?,?,?,?)";
+ Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
+
+ // when
+ String actual = Utils.getNativeSql(nativeSql, parameters);
+
+ // then
+ String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4) (200,3.1415,'xyz',5), (300,3.141592,'uvw',6)";
+ Assert.assertEquals(expected, actual);
+ }
+
@Test
public void lineTerminator() {
// given
@@ -100,6 +142,32 @@ public class UtilsTest {
Assert.assertEquals(expected, actual);
}
+ @Test
+ public void lineTerminatorAndMultiValuesAndNoneOrMoreWhitespace() {
+ String nativeSql = "INSERT Into ? TAGS(?) VALUES(?,?,\r\n?,?),(?,? ,\r\n?,?) t? tags (?) Values (?,?,?\r\n,?) (?,?,?,?) t? Tags(?) values (?,?,?,?) , (?,?,?,?)";
+ Object[] parameters = Stream.of("t1", "abc", 100, 1.1, "xxx", "xxx", 200, 2.2, "xxx", "xxx", 2, "bcd", 300, 3.3, "xxx", "xxx", 400, 4.4, "xxx", "xxx", 3, "cde", 500, 5.5, "xxx", "xxx", 600, 6.6, "xxx", "xxx").toArray();
+
+ // when
+ String actual = Utils.getNativeSql(nativeSql, parameters);
+
+ // then
+ String expected = "INSERT Into t1 TAGS('abc') VALUES(100,1.1,\r\n'xxx','xxx'),(200,2.2 ,\r\n'xxx','xxx') t2 tags ('bcd') Values (300,3.3,'xxx'\r\n,'xxx') (400,4.4,'xxx','xxx') t3 Tags('cde') values (500,5.5,'xxx','xxx') , (600,6.6,'xxx','xxx')";
+ Assert.assertEquals(expected, actual);
+ }
+
+ @Test
+ public void multiValuesAndNoneOrMoreWhitespace() {
+ String nativeSql = "INSERT INTO ? USING traces TAGS (?, ?) VALUES (?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?)";
+ Object[] parameters = Stream.of("t1", "t1", "t2", 1632968284000L, 111.111, 119.001, 0.4, 90, 99.1, "WGS84", 1632968285000L, 111.21109999999999, 120.001, 0.5, 91, 99.19999999999999, "WGS84").toArray();
+
+ // when
+ String actual = Utils.getNativeSql(nativeSql, parameters);
+
+ // then
+ String expected = "INSERT INTO t1 USING traces TAGS ('t1', 't2') VALUES (1632968284000, 111.111, 119.001, 0.4, 90, 99.1, 'WGS84') (1632968285000, 111.21109999999999, 120.001, 0.5, 91, 99.19999999999999, 'WGS84')";
+ Assert.assertEquals(expected, actual);
+ }
+
@Test
public void replaceNothing() {
// given
diff --git a/src/connector/jdbc/src/test/resources/commons-logging.properties b/src/connector/jdbc/src/test/resources/commons-logging.properties
new file mode 100644
index 0000000000000000000000000000000000000000..ac435a2a1bd64ca9925948d486b453638cb8caac
--- /dev/null
+++ b/src/connector/jdbc/src/test/resources/commons-logging.properties
@@ -0,0 +1,2 @@
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.SimpleLog
+
diff --git a/src/connector/jdbc/src/test/resources/simplelog.properties b/src/connector/jdbc/src/test/resources/simplelog.properties
new file mode 100644
index 0000000000000000000000000000000000000000..abcc1ef6d56112c892377ca47453b65ed924a9a9
--- /dev/null
+++ b/src/connector/jdbc/src/test/resources/simplelog.properties
@@ -0,0 +1,5 @@
+org.apache.commons.logging.simplelog.defaultlog=TRACE
+org.apache.commons.logging.simplelog.showlogname=true
+org.apache.commons.logging.simplelog.showShortLogname=restful
+org.apache.commons.logging.simplelog.showdatetime=true
+org.apache.commons.logging.simplelog.dateTimeFormat=yyyy-mm-dd hh:MM:ss.SSS
\ No newline at end of file
diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js
index 5ba2739c35b1f0aef61ba3e52ae5d2f3a901df77..3c395ec205a9c39b3c6e62532de536feef093544 100644
--- a/src/connector/nodejs/nodetaos/cinterface.js
+++ b/src/connector/nodejs/nodetaos/cinterface.js
@@ -12,6 +12,7 @@ const FieldTypes = require('./constants');
const errors = require('./error');
const TaosObjects = require('./taosobjects');
const { NULL_POINTER } = require('ref-napi');
+const { Console } = require('console');
module.exports = CTaosInterface;
@@ -53,6 +54,18 @@ function convertTinyint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0
}
return res;
}
+function convertTinyintUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readUIntLE(currOffset, 1);
+ res.push(d == FieldTypes.C_TINYINT_UNSIGNED_NULL ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+
function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@@ -64,6 +77,18 @@ function convertSmallint(data, num_of_rows, nbytes = 0, offset = 0, precision =
}
return res;
}
+function convertSmallintUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readUIntLE(currOffset, 2);
+ res.push(d == FieldTypes.C_SMALLINT_UNSIGNED_NULL ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+
function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@@ -75,6 +100,19 @@ function convertInt(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
}
return res;
}
+function convertIntUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readUInt32LE(currOffset);
+ res.push(d == FieldTypes.C_INT_UNSIGNED_NULL ? null : d);
+ currOffset += nbytes;
+ }
+ return res;
+}
+
+
function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@@ -86,6 +124,19 @@ function convertBigint(data, num_of_rows, nbytes = 0, offset = 0, precision = 0)
}
return res;
}
+function convertBigintUnsigned(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
+ data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
+ let res = [];
+ let currOffset = 0;
+ while (currOffset < data.length) {
+ let d = data.readUInt64LE(currOffset);
+ res.push(d == FieldTypes.C_BIGINT_UNSIGNED_NULL ? null : BigInt(d));
+ currOffset += nbytes;
+ }
+ return res;
+}
+
+
function convertFloat(data, num_of_rows, nbytes = 0, offset = 0, precision = 0) {
data = ref.reinterpret(data.deref(), nbytes * num_of_rows, offset);
let res = [];
@@ -156,7 +207,11 @@ let convertFunctions = {
[FieldTypes.C_DOUBLE]: convertDouble,
[FieldTypes.C_BINARY]: convertBinary,
[FieldTypes.C_TIMESTAMP]: convertTimestamp,
- [FieldTypes.C_NCHAR]: convertNchar
+ [FieldTypes.C_NCHAR]: convertNchar,
+ [FieldTypes.C_TINYINT_UNSIGNED]: convertTinyintUnsigned,
+ [FieldTypes.C_SMALLINT_UNSIGNED]: convertSmallintUnsigned,
+ [FieldTypes.C_INT_UNSIGNED]: convertIntUnsigned,
+ [FieldTypes.C_BIGINT_UNSIGNED]: convertBigintUnsigned
}
// Define TaosField structure
@@ -321,6 +376,7 @@ CTaosInterface.prototype.close = function close(connection) {
CTaosInterface.prototype.query = function query(connection, sql) {
return this.libtaos.taos_query(connection, ref.allocCString(sql));
}
+
CTaosInterface.prototype.affectedRows = function affectedRows(result) {
return this.libtaos.taos_affected_rows(result);
}
@@ -413,6 +469,7 @@ CTaosInterface.prototype.query_a = function query_a(connection, sql, callback, p
this.libtaos.taos_query_a(connection, ref.allocCString(sql), callback, param);
return param;
}
+
/** Asynchrnously fetches the next block of rows. Wraps callback and transfers a 4th argument to the cursor, the row data as blocks in javascript form
* Note: This isn't a recursive function, in order to fetch all data either use the TDengine cursor object, TaosQuery object, or implement a recrusive
* function yourself using the libtaos.taos_fetch_rows_a function
diff --git a/src/connector/nodejs/nodetaos/constants.js b/src/connector/nodejs/nodetaos/constants.js
index cd6a0c9fbaff51e7f0ecd3ab06907b7b1fb7dcb1..3a866315507371fdfc69efb6de550b7c21f660b7 100644
--- a/src/connector/nodejs/nodetaos/constants.js
+++ b/src/connector/nodejs/nodetaos/constants.js
@@ -36,13 +36,21 @@ module.exports = {
C_BINARY : 8,
C_TIMESTAMP : 9,
C_NCHAR : 10,
+ C_TINYINT_UNSIGNED : 11,
+ C_SMALLINT_UNSIGNED : 12,
+ C_INT_UNSIGNED : 13,
+ C_BIGINT_UNSIGNED : 14,
// NULL value definition
// NOTE: These values should change according to C definition in tsdb.h
C_BOOL_NULL : 2,
C_TINYINT_NULL : -128,
+ C_TINYINT_UNSIGNED_NULL : 255,
C_SMALLINT_NULL : -32768,
+ C_SMALLINT_UNSIGNED_NULL : 65535,
C_INT_NULL : -2147483648,
- C_BIGINT_NULL : -9223372036854775808,
+ C_INT_UNSIGNED_NULL : 4294967295,
+ C_BIGINT_NULL : -9223372036854775808n,
+ C_BIGINT_UNSIGNED_NULL : 18446744073709551615n,
C_FLOAT_NULL : 2146435072,
C_DOUBLE_NULL : -9223370937343148032,
C_NCHAR_NULL : 4294967295,
@@ -64,6 +72,10 @@ const typeCodesToName = {
8 : 'Binary',
9 : 'Timestamp',
10 : 'Nchar',
+ 11 : 'TINYINT_UNSIGNED',
+ 12 : 'SMALLINT_UNSIGNED',
+ 13 : 'INT_UNSIGNED',
+ 14 : 'BIGINT_UNSIGNED',
}
/**
diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json
index 6a2c66100b3d1921b3ce8997e70d33f024e5c3f2..711db94b84fab40d8d1809a44c45b24a9ab5bafb 100644
--- a/src/connector/nodejs/package.json
+++ b/src/connector/nodejs/package.json
@@ -7,7 +7,7 @@
"test": "test"
},
"scripts": {
- "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js"
+ "test": "node test/test.js && node test/testMicroseconds.js && node test/testNanoseconds.js && node test/testUnsignedType.js "
},
"repository": {
"type": "git",
diff --git a/src/connector/nodejs/test/test.js b/src/connector/nodejs/test/test.js
index caf05955da4c960ebedc872f400c17d18be767dd..06adf912a57bfa369b9567d0b5b3a1c8fb105ce8 100644
--- a/src/connector/nodejs/test/test.js
+++ b/src/connector/nodejs/test/test.js
@@ -90,7 +90,7 @@ c1.execute("create table if not exists td_connector_test.weather(ts timestamp, t
c1.execute("insert into t1 using weather tags('北京') values(now, 11.11, 11)");
c1.execute("insert into t1(ts, temperature) values(now, 22.22)");
c1.execute("insert into t1(ts, humidity) values(now, 33)");
-c1.query('select * from test.t1', true).then(function (result) {
+c1.query('select * from td_connector_test.t1', true).then(function (result) {
result.pretty();
});
diff --git a/src/connector/nodejs/test/testUnsignedType.js b/src/connector/nodejs/test/testUnsignedType.js
new file mode 100644
index 0000000000000000000000000000000000000000..82413afebad0b75116fe3ea46e50716843d87c84
--- /dev/null
+++ b/src/connector/nodejs/test/testUnsignedType.js
@@ -0,0 +1,26 @@
+const taos = require('../tdengine');
+var conn = taos.connect({ host: "127.0.0.1", user: "root", password: "taosdata", config: "/etc/taos", port: 10 });
+var c1 = conn.cursor();
+executeUpdate("create database nodedb;");
+executeUpdate("use nodedb;");
+executeUpdate("create table unsigntest(ts timestamp,ut tinyint unsigned,us smallint unsigned,ui int unsigned,ub bigint unsigned,bi bigint);");
+executeUpdate("insert into unsigntest values (now, 254,65534,4294967294,18446744073709551614,9223372036854775807);");
+executeUpdate("insert into unsigntest values (now, 0,0,0,0,-9223372036854775807);");
+executeQuery("select * from unsigntest;");
+executeUpdate("drop database nodedb;");
+
+
+function executeUpdate(sql) {
+ console.log(sql);
+ c1.execute(sql);
+}
+function executeQuery(sql) {
+ c1.execute(sql)
+ var data = c1.fetchall();
+ // Latest query's Field metadata is stored in cursor.fields
+ console.log(c1.fields);
+ // Latest query's result data is stored in cursor.data, also returned by fetchall.
+ console.log(c1.data);
+}
+setTimeout(()=>conn.close(),2000);
+
diff --git a/src/connector/python/README.md b/src/connector/python/README.md
index b5d841601f20fbad5bdc1464d5d83f512b25dfc4..679735131105739ae59940c29b51f57496a2057d 100644
--- a/src/connector/python/README.md
+++ b/src/connector/python/README.md
@@ -5,14 +5,27 @@
## Install
-```sh
-git clone --depth 1 https://github.com/taosdata/TDengine.git
-pip install ./TDengine/src/connector/python
+You can use `pip` to install the connector from PyPI:
+
+```bash
+pip install taospy
+```
+
+Or with git url:
+
+```bash
+pip install git+https://github.com/taosdata/taos-connector-python.git
+```
+
+If you have installed TDengine server or client with prebuilt packages, then you can install the connector from path:
+
+```bash
+pip install /usr/local/taos/connector/python
```
## Source Code
-[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/TDengine/tree/develop/src/connector/python).
+[TDengine](https://github.com/taosdata/TDengine) connector for Python source code is hosted on [GitHub](https://github.com/taosdata/taos-connector-python).
## Examples
diff --git a/src/connector/python/examples/insert-lines.py b/src/connector/python/examples/insert-lines.py
index 755050dfb52b180567dd80e87b63508fc4101172..1d20af7e9bcac23deb70c1dbd058bb86dd5585a5 100644
--- a/src/connector/python/examples/insert-lines.py
+++ b/src/connector/python/examples/insert-lines.py
@@ -1,4 +1,5 @@
import taos
+from taos import SmlProtocol, SmlPrecision
conn = taos.connect()
dbname = "pytest_line"
@@ -9,10 +10,10 @@ conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"pass",c2=false,c4=4f64 1626006833639000000',
]
-conn.schemaless_insert(lines, 0, "ns")
+conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
print("inserted")
-conn.schemaless_insert(lines, 0, "ns")
+conn.schemaless_insert(lines, taos.SmlProtocol.LINE_PROTOCOL, taos.SmlPrecision.NOT_CONFIGURED)
result = conn.query("show tables")
for row in result:
diff --git a/src/connector/python/pyproject.toml b/src/connector/python/pyproject.toml
index a8099199563a0e5957a7d69e75bab65cca6d17db..69e3351712b647712a88d7067545ea12ed86506d 100644
--- a/src/connector/python/pyproject.toml
+++ b/src/connector/python/pyproject.toml
@@ -1,10 +1,13 @@
[tool.poetry]
-name = "taos"
-version = "2.1.0"
+name = "taospy"
+version = "2.1.2"
description = "TDengine connector for python"
authors = ["Taosdata Inc. "]
license = "AGPL-3.0"
readme = "README.md"
+packages = [
+ {include = "taos"}
+]
[tool.poetry.dependencies]
python = "^2.7 || ^3.4"
@@ -12,12 +15,12 @@ typing = "*"
[tool.poetry.dev-dependencies]
pytest = [
- { version = "^4.6", python = "^2.7" },
- { version = "^6.2", python = "^3.7" }
+ { version = "^4.6", python = ">=2.7,<3.0" },
+ { version = "^6.2", python = ">=3.7,<4.0" }
]
pdoc = { version = "^7.1.1", python = "^3.7" }
mypy = { version = "^0.910", python = "^3.6" }
-black = { version = "^21.7b0", python = "^3.6" }
+black = [{ version = "^21.*", python = ">=3.6.2,<4.0" }]
[build-system]
requires = ["poetry-core>=1.0.0"]
diff --git a/src/connector/python/setup.py b/src/connector/python/setup.py
index b7e10001737bc40c04173ea4a65e95248965ffda..8f1dfafe4762e4a55a6d3e7c645c945a67a10f68 100644
--- a/src/connector/python/setup.py
+++ b/src/connector/python/setup.py
@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
- version="2.1.0",
+ version="2.1.1",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py
index ebbad68c5a8a148a601fb5ec48f9658a1920ed62..7ebfa8adef6a82c979ad0544a3eb11ccd351b760 100644
--- a/src/connector/python/taos/__init__.py
+++ b/src/connector/python/taos/__init__.py
@@ -440,19 +440,16 @@ from .cursor import *
from .result import *
from .statement import *
from .subscription import *
+from .schemaless import *
-try:
- import importlib.metadata
-
- __version__ = importlib.metadata.version("taos")
-except:
- None
+from taos._version import __version__
# Globals
threadsafety = 0
paramstyle = "pyformat"
__all__ = [
+ "__version__",
# functions
"connect",
"new_bind_param",
@@ -468,6 +465,8 @@ __all__ = [
"TaosRow",
"TaosStmt",
"PrecisionEnum",
+ "SmlPrecision",
+ "SmlProtocol"
]
def connect(*args, **kwargs):
diff --git a/src/connector/python/taos/_version.py b/src/connector/python/taos/_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..f811561263c557cf534e90ff763373bccacb20b6
--- /dev/null
+++ b/src/connector/python/taos/_version.py
@@ -0,0 +1 @@
+__version__ = '2.1.2'
diff --git a/src/connector/python/taos/cinterface.py b/src/connector/python/taos/cinterface.py
index 1223b4544899dd83d3f1ea1a519def035de8ebcf..37bc90d4c63fe3f75b12d46bb1bf535441869938 100644
--- a/src/connector/python/taos/cinterface.py
+++ b/src/connector/python/taos/cinterface.py
@@ -2,8 +2,9 @@
import ctypes
import platform
-import sys
+import inspect
from ctypes import *
+
try:
from typing import Any
except:
@@ -12,7 +13,9 @@ except:
from .error import *
from .bind import *
from .field import *
+from .schemaless import *
+_UNSUPPORTED = {}
# stream callback
stream_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
@@ -46,10 +49,13 @@ def _load_taos():
"Darwin": _load_taos_darwin,
"Windows": _load_taos_windows,
}
+ pf = platform.system()
+ if load_func[pf] is None:
+ raise InterfaceError("unsupported platform: %s" % pf)
try:
- return load_func[platform.system()]()
- except:
- raise InterfaceError('unsupported platform or failed to load taos client library')
+ return load_func[pf]()
+ except Exception as err:
+ raise InterfaceError("unable to load taos C library: %s" % err)
_libtaos = _load_taos()
@@ -64,6 +70,7 @@ _libtaos.taos_consume.restype = ctypes.c_void_p
_libtaos.taos_fetch_lengths.restype = ctypes.POINTER(ctypes.c_int)
_libtaos.taos_free_result.restype = None
_libtaos.taos_query.restype = ctypes.POINTER(ctypes.c_void_p)
+
try:
_libtaos.taos_stmt_errstr.restype = c_char_p
except AttributeError:
@@ -178,6 +185,7 @@ def taos_connect(host=None, user="root", password="taosdata", db=None, port=0):
raise ConnectionError("connect to TDengine failed")
return connection
+
_libtaos.taos_connect_auth.restype = c_void_p
_libtaos.taos_connect_auth.argtypes = c_char_p, c_char_p, c_char_p, c_char_p, c_uint16
@@ -233,6 +241,7 @@ def taos_connect_auth(host=None, user="root", auth="", db=None, port=0):
raise ConnectionError("connect to TDengine failed")
return connection
+
_libtaos.taos_query.restype = c_void_p
_libtaos.taos_query.argtypes = c_void_p, c_char_p
@@ -284,6 +293,7 @@ def taos_affected_rows(result):
"""The affected rows after runing query"""
return _libtaos.taos_affected_rows(result)
+
subscribe_callback_type = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_int)
_libtaos.taos_subscribe.restype = c_void_p
# _libtaos.taos_subscribe.argtypes = c_void_p, c_int, c_char_p, c_char_p, subscribe_callback_type, c_void_p, c_int
@@ -314,7 +324,7 @@ def taos_subscribe(connection, restart, topic, sql, interval, callback=None, par
_libtaos.taos_consume.restype = c_void_p
-_libtaos.taos_consume.argstype = c_void_p,
+_libtaos.taos_consume.argstype = (c_void_p,)
def taos_consume(sub):
@@ -500,13 +510,17 @@ def taos_stop_query(result):
return _libtaos.taos_stop_query(result)
-_libtaos.taos_load_table_info.restype = c_int
-_libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p)
+try:
+ _libtaos.taos_load_table_info.restype = c_int
+ _libtaos.taos_load_table_info.argstype = (c_void_p, c_char_p)
+except Exception as err:
+ _UNSUPPORTED["taos_open_stream"] = err
def taos_load_table_info(connection, tables):
# type: (ctypes.c_void_p, str) -> None
"""Stop current query"""
+ _check_if_supported()
errno = _libtaos.taos_load_table_info(connection, c_char_p(tables.encode("utf-8")))
if errno != 0:
msg = taos_errstr()
@@ -559,12 +573,13 @@ def taos_select_db(connection, db):
try:
_libtaos.taos_open_stream.restype = c_void_p
_libtaos.taos_open_stream.argstype = c_void_p, c_char_p, stream_callback_type, c_int64, c_void_p, Any
-except:
- pass
+except Exception as err:
+ _UNSUPPORTED["taos_open_stream"] = err
def taos_open_stream(connection, sql, callback, stime=0, param=None, callback2=None):
# type: (ctypes.c_void_p, str, stream_callback_type, c_int64, c_void_p, c_void_p) -> ctypes.pointer
+ _check_if_supported()
if callback2 != None:
callback2 = stream_callback2_type(callback2)
"""Open an stream"""
@@ -597,6 +612,7 @@ def taos_stmt_init(connection):
"""
return c_void_p(_libtaos.taos_stmt_init(connection))
+
_libtaos.taos_stmt_prepare.restype = c_int
_libtaos.taos_stmt_prepare.argstype = (c_void_p, c_char_p, c_int)
@@ -615,6 +631,7 @@ def taos_stmt_prepare(stmt, sql):
_libtaos.taos_stmt_close.restype = c_int
_libtaos.taos_stmt_close.argstype = (c_void_p,)
+
def taos_stmt_close(stmt):
# type: (ctypes.c_void_p) -> None
"""Close a statement query
@@ -624,17 +641,12 @@ def taos_stmt_close(stmt):
if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
-try:
- _libtaos.taos_stmt_errstr.restype = c_char_p
- _libtaos.taos_stmt_errstr.argstype = (c_void_p,)
-except AttributeError:
- print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info())
try:
_libtaos.taos_stmt_errstr.restype = c_char_p
_libtaos.taos_stmt_errstr.argstype = (c_void_p,)
-except AttributeError:
- print("WARNING: libtaos(%s) does not support taos_stmt_errstr" % taos_get_client_info())
+except Exception as err:
+ _UNSUPPORTED["taos_stmt_set_tbname"] = err
def taos_stmt_errstr(stmt):
@@ -642,16 +654,17 @@ def taos_stmt_errstr(stmt):
"""Get error message from stetement query
@stmt: c_void_p TAOS_STMT*
"""
+ _check_if_supported()
err = c_char_p(_libtaos.taos_stmt_errstr(stmt))
if err:
return err.value.decode("utf-8")
+
try:
_libtaos.taos_stmt_set_tbname.restype = c_int
_libtaos.taos_stmt_set_tbname.argstype = (c_void_p, c_char_p)
-except AttributeError:
- print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname" % taos_get_client_info())
-
+except Exception as err:
+ _UNSUPPORTED["taos_stmt_set_tbname"] = err
def taos_stmt_set_tbname(stmt, name):
@@ -659,15 +672,17 @@ def taos_stmt_set_tbname(stmt, name):
"""Set table name of a statement query if exists.
@stmt: c_void_p TAOS_STMT*
"""
+ _check_if_supported()
res = _libtaos.taos_stmt_set_tbname(stmt, c_char_p(name.encode("utf-8")))
if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
try:
_libtaos.taos_stmt_set_tbname_tags.restype = c_int
_libtaos.taos_stmt_set_tbname_tags.argstype = (c_void_p, c_char_p, c_void_p)
-except AttributeError:
- print("WARNING: libtaos(%s) does not support taos_stmt_set_tbname_tags" % taos_get_client_info())
+except Exception as err:
+ _UNSUPPORTED["taos_stmt_set_tbname_tags"] = err
def taos_stmt_set_tbname_tags(stmt, name, tags):
@@ -675,11 +690,13 @@ def taos_stmt_set_tbname_tags(stmt, name, tags):
"""Set table name with tags bind params.
@stmt: c_void_p TAOS_STMT*
"""
+ _check_if_supported()
res = _libtaos.taos_stmt_set_tbname_tags(stmt, ctypes.c_char_p(name.encode("utf-8")), tags)
if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
_libtaos.taos_stmt_is_insert.restype = c_int
_libtaos.taos_stmt_is_insert.argstype = (c_void_p, POINTER(c_int))
@@ -699,6 +716,7 @@ def taos_stmt_is_insert(stmt):
_libtaos.taos_stmt_num_params.restype = c_int
_libtaos.taos_stmt_num_params.argstype = (c_void_p, POINTER(c_int))
+
def taos_stmt_num_params(stmt):
# type: (ctypes.c_void_p) -> int
"""Params number of the current statement query.
@@ -710,6 +728,7 @@ def taos_stmt_num_params(stmt):
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
return num_params.value
+
_libtaos.taos_stmt_bind_param.restype = c_int
_libtaos.taos_stmt_bind_param.argstype = (c_void_p, c_void_p)
@@ -726,12 +745,12 @@ def taos_stmt_bind_param(stmt, bind):
if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
try:
_libtaos.taos_stmt_bind_param_batch.restype = c_int
_libtaos.taos_stmt_bind_param_batch.argstype = (c_void_p, c_void_p)
-except AttributeError:
- print("WARNING: libtaos(%s) does not support taos_stmt_bind_param_batch" % taos_get_client_info())
-
+except Exception as err:
+ _UNSUPPORTED["taos_stmt_bind_param_batch"] = err
def taos_stmt_bind_param_batch(stmt, bind):
@@ -742,15 +761,17 @@ def taos_stmt_bind_param_batch(stmt, bind):
"""
# ptr = ctypes.cast(bind, POINTER(TaosMultiBind))
# ptr = pointer(bind)
+ _check_if_supported()
res = _libtaos.taos_stmt_bind_param_batch(stmt, bind)
if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
+
try:
_libtaos.taos_stmt_bind_single_param_batch.restype = c_int
_libtaos.taos_stmt_bind_single_param_batch.argstype = (c_void_p, c_void_p, c_int)
-except AttributeError:
- print("WARNING: libtaos(%s) does not support taos_stmt_bind_single_param_batch" % taos_get_client_info())
+except Exception as err:
+ _UNSUPPORTED["taos_stmt_bind_single_param_batch"] = err
def taos_stmt_bind_single_param_batch(stmt, bind, col):
@@ -760,6 +781,7 @@ def taos_stmt_bind_single_param_batch(stmt, bind, col):
@bind: TAOS_MULTI_BIND*
@col: column index
"""
+ _check_if_supported()
res = _libtaos.taos_stmt_bind_single_param_batch(stmt, bind, col)
if res != 0:
raise StatementError(msg=taos_stmt_errstr(stmt), errno=res)
@@ -807,31 +829,43 @@ def taos_stmt_use_result(stmt):
raise StatementError(taos_stmt_errstr(stmt))
return result
-try:
- _libtaos.taos_insert_lines.restype = c_int
- _libtaos.taos_insert_lines.argstype = c_void_p, c_void_p, c_int
-except AttributeError:
- print("WARNING: libtaos(%s) does not support insert_lines" % taos_get_client_info())
-
+try:
+ _libtaos.taos_schemaless_insert.restype = c_void_p
+ _libtaos.taos_schemaless_insert.argstype = c_void_p, c_void_p, c_int, c_int, c_int
+except Exception as err:
+ _UNSUPPORTED["taos_schemaless_insert"] = err
def taos_schemaless_insert(connection, lines, protocol, precision):
- # type: (c_void_p, list[str] | tuple(str)) -> None
+ # type: (c_void_p, list[str] | tuple(str), SmlProtocol, SmlPrecision) -> int
+ _check_if_supported()
num_of_lines = len(lines)
lines = (c_char_p(line.encode("utf-8")) for line in lines)
lines_type = ctypes.c_char_p * num_of_lines
p_lines = lines_type(*lines)
res = c_void_p(_libtaos.taos_schemaless_insert(connection, p_lines, num_of_lines, protocol, precision))
errno = taos_errno(res)
+ affected_rows = taos_affected_rows(res)
if errno != 0:
errstr = taos_errstr(res)
taos_free_result(res)
- print("schemaless_insert error affected rows: {}".format(taos_affected_rows(res)))
- raise SchemalessError(errstr, errno)
+ raise SchemalessError(errstr, errno, affected_rows)
taos_free_result(res)
- return errno
+ return affected_rows
+
+
+def _check_if_supported():
+ func = inspect.stack()[1][3]
+ if func in _UNSUPPORTED:
+ raise InterfaceError("C function %s is not supported in v%s: %s" % (func, taos_get_client_info(), _UNSUPPORTED[func]))
+
+
+def unsupported_methods():
+ for m, e in range(_UNSUPPORTED):
+ print("unsupported %s: %s", m, e)
+
class CTaosInterface(object):
def __init__(self, config=None):
diff --git a/src/connector/python/taos/connection.py b/src/connector/python/taos/connection.py
index dfac42f244d19267124c5ea790d4503e28fd5a78..dc8225ab33c84930214eb8f0d8ba47f6f31a5adf 100644
--- a/src/connector/python/taos/connection.py
+++ b/src/connector/python/taos/connection.py
@@ -72,10 +72,9 @@ class TaosConnection(object):
taos_select_db(self._conn, database)
def execute(self, sql):
- # type: (str) -> None
+ # type: (str) -> int
"""Simplely execute sql ignoring the results"""
- res = taos_query(self._conn, sql)
- taos_free_result(res)
+ return self.query(sql).affected_rows
def query(self, sql):
# type: (str) -> TaosResult
@@ -118,7 +117,7 @@ class TaosConnection(object):
return TaosStream(stream)
def schemaless_insert(self, lines, protocol, precision):
- # type: (list[str]) -> None
+ # type: (list[str], SmlProtocol, SmlPrecision) -> int
"""
1.Line protocol and schemaless support
@@ -171,6 +170,7 @@ class TaosConnection(object):
conn.schemaless_insert(lines, 2, None)
"""
+ print(lines, protocol, precision)
return taos_schemaless_insert(self._conn, lines, protocol, precision)
diff --git a/src/connector/python/taos/error.py b/src/connector/python/taos/error.py
index 723f6f1a2db1249a3773538b4bfa6d51595a005d..122466fe3c448ec551fb910c402ad14bb6c93336 100644
--- a/src/connector/python/taos/error.py
+++ b/src/connector/python/taos/error.py
@@ -83,7 +83,16 @@ class ResultError(DatabaseError):
class SchemalessError(DatabaseError):
"""taos_schemaless_insert errors."""
- pass
+ def __init__(self, msg=None, errno=0xffff, affected_rows=0):
+ DatabaseError.__init__(self, msg, errno)
+ self.affected_rows = affected_rows
+
+ def __str__(self):
+ return self._full_msg + "(affected rows: %d)" % self.affected_rows
+
+ # @property
+ # def affected_rows(self):
+ # return self.affected_rows
class StatementError(DatabaseError):
diff --git a/src/connector/python/taos/result.py b/src/connector/python/taos/result.py
index 81151733615d1b7fdc3318b6e53888ae39d32b14..c9feb4d6502515cc6e3e2d4be688f2e7fcd895b2 100644
--- a/src/connector/python/taos/result.py
+++ b/src/connector/python/taos/result.py
@@ -123,6 +123,12 @@ class TaosResult(object):
for i in range(len(self._fields)):
buffer[i].extend(block[i])
return list(map(tuple, zip(*buffer)))
+
+ def fetch_all_into_dict(self):
+ """Fetch all rows and convert it to dict"""
+ names = [field.name for field in self.fields]
+ rows = self.fetch_all()
+ return list(dict(zip(names, row)) for row in rows)
def fetch_rows_a(self, callback, param):
taos_fetch_rows_a(self._result, callback, param)
@@ -228,6 +234,12 @@ class TaosRow:
blocks[i] = CONVERT_FUNC[fields[i].type](data, 1, field_lens[i], precision)[0]
return tuple(blocks)
+ def as_dict(self):
+ values = self.as_tuple()
+ names = self._result.fields
+ dict(zip(names, values))
+
+
class TaosBlocks:
"""TDengine result blocks iterator"""
diff --git a/src/connector/python/taos/schemaless.py b/src/connector/python/taos/schemaless.py
new file mode 100644
index 0000000000000000000000000000000000000000..35967412f78a63e67d63f0e58bbf903f21fb275a
--- /dev/null
+++ b/src/connector/python/taos/schemaless.py
@@ -0,0 +1,17 @@
+
+class SmlPrecision:
+ """Schemaless timestamp precision constants"""
+ NOT_CONFIGURED = 0 # C.TSDB_SML_TIMESTAMP_NOT_CONFIGURED
+ HOURS = 1
+ MINUTES = 2
+ SECONDS = 3
+ MILLI_SECONDS = 4
+ MICRO_SECONDS = 5
+ NANO_SECONDS = 6
+
+class SmlProtocol:
+ """Schemaless protocol constants"""
+ UNKNOWN_PROTOCOL = 0
+ LINE_PROTOCOL = 1
+ TELNET_PROTOCOL = 2
+ JSON_PROTOCOL = 3
\ No newline at end of file
diff --git a/src/connector/python/tests/test_lines.py b/src/connector/python/tests/test_lines.py
index 157580f8466ce765246184421f0756958455a54b..51d23b8e891d398b404086fdb2ff2910dcc1eb0a 100644
--- a/src/connector/python/tests/test_lines.py
+++ b/src/connector/python/tests/test_lines.py
@@ -1,4 +1,4 @@
-from taos.error import OperationalError
+from taos.error import OperationalError, SchemalessError
from taos import connect, new_bind_params, PrecisionEnum
from taos import *
@@ -13,35 +13,95 @@ def conn():
return connect()
+def test_schemaless_insert_update_2(conn):
+ # type: (TaosConnection) -> None
+
+ dbname = "test_schemaless_insert_update_2"
+ try:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
+ conn.select_db(dbname)
+
+ lines = [
+ 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000',
+ ]
+ res = conn.schemaless_insert(lines, 1, 0)
+ print("affected rows: ", res)
+ assert(res == 1)
+
+ result = conn.query("select * from st")
+ [before] = result.fetch_all_into_dict()
+ assert(before["c3"] == "passitagin, abc")
+
+ lines = [
+ 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000',
+ ]
+ res = conn.schemaless_insert(lines, 1, 0)
+ result = conn.query("select * from st")
+ [after] = result.fetch_all_into_dict()
+ assert(after["c3"] == "passitagin")
+
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+
+ except Exception as err:
+ conn.execute("drop database if exists %s" % dbname)
+ conn.close()
+ print(err)
+ raise err
+
def test_schemaless_insert(conn):
# type: (TaosConnection) -> None
dbname = "pytest_taos_schemaless_insert"
try:
conn.execute("drop database if exists %s" % dbname)
- conn.execute("create database if not exists %s precision 'us'" % dbname)
+ conn.execute("create database if not exists %s update 2 precision 'ns'" % dbname)
conn.select_db(dbname)
lines = [
'st,t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000',
- 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000',
+ 'st,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin, abc",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000',
'stf,t1=4i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000',
]
- conn.schemaless_insert(lines, 0, "ns")
- print("inserted")
+ res = conn.schemaless_insert(lines, 1, 0)
+ print("affected rows: ", res)
+ assert(res == 3)
lines = [
'stf,t1=5i64,t3="t4",t2=5f64,t4=5f64 c1=3i64,c3=L"passitagin_stf",c2=false,c5=5f64,c6=7u64 1626006933641000000',
]
- conn.schemaless_insert(lines, 0, "ns")
- print("inserted")
+ res = conn.schemaless_insert(lines, 1, 0)
+ print("affected rows: ", res)
+ assert(res == 1)
+ result = conn.query("select * from st")
+
+ dict2 = result.fetch_all_into_dict()
+ print(dict2)
+ result.row_count
+ all = result.rows_iter()
+ for row in all:
+ print(row)
+ result.close()
+ assert(result.row_count == 2)
+
+ # error test
+ lines = [
+ ',t1=3i64,t2=4f64,t3="t3" c1=3i64,c3=L"passit",c2=false,c4=4f64 1626006833639000000',
+ ]
+ try:
+ res = conn.schemaless_insert(lines, 1, 0)
+ print(res)
+ # assert(False)
+ except SchemalessError as err:
+ pass
+
result = conn.query("select * from st")
- print(*result.fields)
+ result.row_count
all = result.rows_iter()
for row in all:
print(row)
result.close()
- print(result.row_count)
conn.execute("drop database if exists %s" % dbname)
conn.close()
@@ -55,3 +115,4 @@ def test_schemaless_insert(conn):
if __name__ == "__main__":
test_schemaless_insert(connect())
+ test_schemaless_insert_update_2(connect())
diff --git a/src/connector/python/tests/test_stmt.py b/src/connector/python/tests/test_stmt.py
index 938ba10eb3d2377a63f7972deb99dbd47f7de1b2..3368ecb6a9336a4295790f2cd55314ac9bb6290e 100644
--- a/src/connector/python/tests/test_stmt.py
+++ b/src/connector/python/tests/test_stmt.py
@@ -1,3 +1,4 @@
+# encoding:UTF-8
from taos import *
from ctypes import *
diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt
index 0ac2f4d6876259d826f62cbd7dc5fa2d30b6553b..d9c4a84234184b14d272854838625e023dd55cea 100644
--- a/src/dnode/CMakeLists.txt
+++ b/src/dnode/CMakeLists.txt
@@ -39,6 +39,10 @@ IF (TD_GRANT)
TARGET_LINK_LIBRARIES(taosd grant)
ENDIF ()
+IF (TD_USB_DONGLE)
+ TARGET_LINK_LIBRARIES(taosd usb_dongle)
+ENDIF ()
+
IF (TD_MQTT)
TARGET_LINK_LIBRARIES(taosd mqtt)
ENDIF ()
diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c
index c6f6e976f65e66360e750ebdb5e056d79c8923a8..0dc10385cfbf648ce90f3dabaf87c9a2ea189b6f 100644
--- a/src/dnode/src/dnodeMain.c
+++ b/src/dnode/src/dnodeMain.c
@@ -23,6 +23,7 @@
#include "twal.h"
#include "tfs.h"
#include "tsync.h"
+#include "tgrant.h"
#include "dnodeStep.h"
#include "dnodePeer.h"
#include "dnodeModule.h"
@@ -88,6 +89,7 @@ static SStep tsDnodeSteps[] = {
{"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer},
{"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry},
{"dnode-script", scriptEnvPoolInit, scriptEnvPoolCleanup},
+ {"dnode-grant", grantInit, grantCleanUp},
};
static SStep tsDnodeCompactSteps[] = {
diff --git a/src/inc/taos.h b/src/inc/taos.h
index 4afec942ff991ce1009cb8c54113562f93f9c92d..6cd62d3177d2490c5c89bf910e258c956c2f69fc 100644
--- a/src/inc/taos.h
+++ b/src/inc/taos.h
@@ -175,11 +175,13 @@ DLL_EXPORT int taos_select_db(TAOS *taos, const char *db);
DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields);
DLL_EXPORT void taos_stop_query(TAOS_RES *res);
DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col);
+DLL_EXPORT bool taos_is_update_query(TAOS_RES *res);
DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows);
-DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
-
DLL_EXPORT int* taos_fetch_lengths(TAOS_RES *res);
+DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql);
+DLL_EXPORT void taos_reset_current_db(TAOS *taos);
+
// TAOS_RES *taos_list_tables(TAOS *mysql, const char *wild);
// TAOS_RES *taos_list_dbs(TAOS *mysql, const char *wild);
@@ -192,7 +194,6 @@ DLL_EXPORT int taos_errno(TAOS_RES *tres);
DLL_EXPORT void taos_query_a(TAOS *taos, const char *sql, void (*fp)(void *param, TAOS_RES *, int code), void *param);
DLL_EXPORT void taos_fetch_rows_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, int numOfRows), void *param);
-//DLL_EXPORT void taos_fetch_row_a(TAOS_RES *res, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), void *param);
typedef void (*TAOS_SUBSCRIBE_CALLBACK)(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code);
DLL_EXPORT TAOS_SUB *taos_subscribe(TAOS* taos, int restart, const char* topic, const char *sql, TAOS_SUBSCRIBE_CALLBACK fp, void *param, int interval);
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index d7848937b137c2e458c567099e3df0e386eb92fa..9d48ed59cecfffe1ea36971fa502ed9dae3fb0bc 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -99,6 +99,7 @@ extern const int32_t TYPE_BYTES[15];
#define TS_PATH_DELIMITER "."
#define TS_ESCAPE_CHAR '`'
+#define TS_ESCAPE_CHAR_SIZE 2
#define TSDB_TIME_PRECISION_MILLI 0
#define TSDB_TIME_PRECISION_MICRO 1
@@ -294,7 +295,7 @@ do { \
#define TSDB_DEFAULT_DAYS_PER_FILE 10
#define TSDB_MIN_KEEP 1 // data in db to be reserved.
-#define TSDB_MAX_KEEP 365000 // data in db to be reserved.
+#define TSDB_MAX_KEEP 36500 // data in db to be reserved.
#define TSDB_DEFAULT_KEEP 3650 // ten years
#define TSDB_DEFAULT_MIN_ROW_FBLOCK 100
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index dfdd016bb66244394310e4c34e689c3428d8914b..c6d587fe1a296bc40ab804cdef160b70da273fd8 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -400,7 +400,7 @@ typedef struct SColIndex {
int16_t colId; // column id
int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
uint16_t flag; // denote if it is a tag or a normal column
- char name[TSDB_COL_NAME_LEN + TSDB_DB_NAME_LEN + 1];
+ char name[TSDB_COL_NAME_LEN + TSDB_TABLE_NAME_LEN + 1];
} SColIndex;
typedef struct SColumnFilterInfo {
diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h
index ad7eaef8cbda7a52f9e2340969d7ab95791d127d..9d82245c2199b5fa0b62d709a08633e5a976b007 100644
--- a/src/inc/tsdb.h
+++ b/src/inc/tsdb.h
@@ -421,9 +421,6 @@ bool tsdbNoProblem(STsdbRepo* pRepo);
// unit of walSize: MB
int tsdbCheckWal(STsdbRepo *pRepo, uint32_t walSize);
-// not commit if other instances in committing state or waiting to commit
-bool tsdbIsNeedCommit(STsdbRepo *pRepo);
-
#ifdef __cplusplus
}
#endif
diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h
index 5840aaaa37274b110aa77218a0b4f9c388a1175b..b38dcb0871d7bac99af891c51671e82a68528470 100644
--- a/src/inc/ttokendef.h
+++ b/src/inc/ttokendef.h
@@ -212,6 +212,7 @@
#define TK_INSERT 194
#define TK_INTO 195
#define TK_VALUES 196
+#define TK_FILE 197
@@ -223,7 +224,6 @@
#define TK_HEX 303 // hex number 0x123
#define TK_OCT 304 // oct number
#define TK_BIN 305 // bin format data 0b111
-#define TK_FILE 306
#define TK_QUESTION 307 // denoting the placeholder of "?",when invoking statement bind query
#endif
diff --git a/src/kit/shell/CMakeLists.txt b/src/kit/shell/CMakeLists.txt
index c3929f77a38a4ba31eb857f2a40e6ff46b6444df..d69a267707470e7a5df4edfa85764aae580a13a6 100644
--- a/src/kit/shell/CMakeLists.txt
+++ b/src/kit/shell/CMakeLists.txt
@@ -26,6 +26,8 @@ ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
ELSEIF (TD_WINDOWS)
+ ADD_DEFINITIONS(-DUNICODE)
+ ADD_DEFINITIONS(-D_UNICODE)
LIST(APPEND SRC ./src/shellEngine.c)
LIST(APPEND SRC ./src/shellMain.c)
LIST(APPEND SRC ./src/shellWindows.c)
diff --git a/src/kit/shell/src/shellImport.c b/src/kit/shell/src/shellImport.c
index 38abb423cfd2c0329dad24244a798f0617b4cbb6..b3a07b257cbfdd639d6834e7981fb10e89e43512 100644
--- a/src/kit/shell/src/shellImport.c
+++ b/src/kit/shell/src/shellImport.c
@@ -25,7 +25,7 @@
static char **shellSQLFiles = NULL;
static int32_t shellSQLFileNum = 0;
-static char shellTablesSQLFile[TSDB_FILENAME_LEN] = {0};
+static char shellTablesSQLFile[4096] = {0};
typedef struct {
pthread_t threadID;
diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c
index 5c9dc0995dacecebd10b7f2b77e216ca97157db0..afed5d2d2ffa680852c1155334499975cd58cfea 100644
--- a/src/kit/shell/src/shellMain.c
+++ b/src/kit/shell/src/shellMain.c
@@ -95,6 +95,9 @@ SShellArguments args = {
*/
int main(int argc, char* argv[]) {
/*setlocale(LC_ALL, "en_US.UTF-8"); */
+#ifdef WINDOWS
+ SetConsoleOutputCP(CP_UTF8);
+#endif
if (!checkVersion()) {
exit(EXIT_FAILURE);
diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c
index cb707d9331d3f1f87227e5096b6d7f047d350ebf..0301fe6df2a6a1fbf8a75507193dfacb55385895 100644
--- a/src/kit/shell/src/shellWindows.c
+++ b/src/kit/shell/src/shellWindows.c
@@ -272,13 +272,16 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE);
// Read input.
- char c;
+ void *console = GetStdHandle(STD_INPUT_HANDLE);
+ unsigned long read;
+ wchar_t c;
+ char mbStr[16];
while (1) {
- c = getchar();
-
+ int ret = ReadConsole(console, &c, 1, &read, NULL);
+ int size = WideCharToMultiByte(CP_UTF8, 0, &c, read, mbStr, sizeof(mbStr), NULL, NULL);
+ mbStr[size] = 0;
switch (c) {
case '\n':
- case '\r':
if (isReadyGo(&cmd)) {
sprintf(command, "%s%s", cmd.buffer, cmd.command);
free(cmd.buffer);
@@ -291,8 +294,12 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
updateBuffer(&cmd);
}
break;
+ case '\r':
+ break;
default:
- insertChar(&cmd, c);
+ for (int i = 0; i < size; ++i) {
+ insertChar(&cmd, mbStr[i]);
+ }
}
}
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 761593f6768d4f5017e9fd48b13ec7eb58a948ec..c2b8479f19d778f030101a8d9fb5ac537ca0475c 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -77,6 +77,7 @@ extern char configDir[];
#define MAX_DATA_SIZE (16*TSDB_MAX_COLUMNS)+20 // max record len: 16*MAX_COLUMNS, timestamp string and ,('') need extra space
#define OPT_ABORT 1 /* –abort */
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255.
+#define MAX_PATH_LEN 4096
#define DEFAULT_START_TIME 1500000000000
@@ -511,7 +512,7 @@ typedef struct SThreadInfo_S {
int threadID;
char db_name[TSDB_DB_NAME_LEN];
uint32_t time_precision;
- char filePath[TSDB_FILENAME_LEN];
+ char filePath[MAX_PATH_LEN];
FILE *fp;
char tb_prefix[TSDB_TABLE_NAME_LEN];
uint64_t start_table_from;
@@ -3481,8 +3482,14 @@ static int postProceSql(char *host, uint16_t port,
'w', 'x', 'y', 'z', '0', '1', '2', '3',
'4', '5', '6', '7', '8', '9', '+', '/'};
- snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
- g_Dbs.user, g_Dbs.password);
+ if (g_args.test_mode == INSERT_TEST) {
+ snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
+ g_Dbs.user, g_Dbs.password);
+ } else {
+ snprintf(userpass_buf, INPUT_BUF_LEN, "%s:%s",
+ g_queryInfo.user, g_queryInfo.password);
+ }
+
size_t userpass_buf_len = strlen(userpass_buf);
size_t encoded_len = 4 * ((userpass_buf_len +2) / 3);
@@ -3547,7 +3554,7 @@ static int postProceSql(char *host, uint16_t port,
do {
#ifdef WINDOWS
- bytes = recv(pThreadInfo->sockfds, response_buf + received, resp_len - received, 0);
+ bytes = recv(pThreadInfo->sockfd, response_buf + received, resp_len - received, 0);
#else
bytes = read(pThreadInfo->sockfd, response_buf + received, resp_len - received);
#endif
@@ -3560,8 +3567,10 @@ static int postProceSql(char *host, uint16_t port,
break;
received += bytes;
- response_buf[RESP_BUF_LEN - 1] = '\0';
+ verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
+ __func__, __LINE__, received, resp_len, response_buf);
+ response_buf[RESP_BUF_LEN - 1] = '\0';
if (strlen(response_buf)) {
verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
__func__, __LINE__, received, resp_len, response_buf);
@@ -3574,7 +3583,7 @@ static int postProceSql(char *host, uint16_t port,
"%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
__func__, __LINE__, received, resp_len, response_buf);
break;
- }
+ }
}
} while(received < resp_len);
@@ -4380,7 +4389,7 @@ static int createSuperTable(
superTbl->lenOfTagOfOneRow = lenOfTagOfOneRow;
-
+
snprintf(command, BUFFER_SIZE,
superTbl->escapeChar ?
"CREATE TABLE IF NOT EXISTS %s.`%s` (ts TIMESTAMP%s) TAGS %s":
@@ -4515,7 +4524,7 @@ int createDatabasesAndStables(char *command) {
if (g_Dbs.db[i].superTbls[j].iface == SML_IFACE) {
goto skip;
}
-
+
sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName,
g_Dbs.db[i].superTbls[j].stbName);
ret = queryDbExec(taos, command, NO_INSERT_TYPE, true);
@@ -4575,7 +4584,7 @@ static void* createTable(void *sarg)
i <= pThreadInfo->end_table_to; i++) {
if (0 == g_Dbs.use_metric) {
snprintf(pThreadInfo->buffer, buff_len,
- g_args.escapeChar ?
+ g_args.escapeChar ?
"CREATE TABLE IF NOT EXISTS %s.`%s%"PRIu64"` %s;" :
"CREATE TABLE IF NOT EXISTS %s.%s%"PRIu64" %s;",
pThreadInfo->db_name,
@@ -6604,7 +6613,6 @@ static int getRowDataFromSample(
stbInfo->sampleDataBuf
+ stbInfo->lenOfOneRow * (*sampleUsePos));
}
-
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
(*sampleUsePos)++;
@@ -7139,7 +7147,7 @@ static void getTableName(char *pTblName,
if (stbInfo) {
if (AUTO_CREATE_SUBTBL != stbInfo->autoCreateTable) {
if (stbInfo->childTblLimit > 0) {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN,
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN,
stbInfo->escapeChar ? "`%s`" : "%s",
stbInfo->childTblName +
(tableSeq - stbInfo->childTblOffset) * TSDB_TABLE_NAME_LEN);
@@ -7152,12 +7160,12 @@ static void getTableName(char *pTblName,
stbInfo->childTblName + tableSeq * TSDB_TABLE_NAME_LEN);
}
} else {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN,
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN,
stbInfo->escapeChar ? "`%s%"PRIu64"`" : "%s%"PRIu64"",
stbInfo->childTblPrefix, tableSeq);
}
} else {
- snprintf(pTblName, TSDB_TABLE_NAME_LEN,
+ snprintf(pTblName, TSDB_TABLE_NAME_LEN,
g_args.escapeChar ? "`%s%"PRIu64"`" : "%s%"PRIu64"",
g_args.tb_prefix, tableSeq);
}
@@ -9713,7 +9721,7 @@ static void generateSmlHead(char* smlHead, SSuperTable* stbInfo, threadInfo* pTh
}
}
-static void generateSmlTail(char* line, char* smlHead, SSuperTable* stbInfo,
+static void generateSmlTail(char* line, char* smlHead, SSuperTable* stbInfo,
threadInfo* pThreadInfo, int64_t timestamp) {
int dataLen = 0;
dataLen = snprintf(line, BUFFER_SIZE, "%s ", smlHead);
@@ -9860,16 +9868,18 @@ static void* syncWriteInterlaceSml(threadInfo *pThreadInfo, uint32_t interlaceRo
} else {
batchPerTblTimes = 1;
}
-
- char *smlHead[pThreadInfo->ntables];
+
+ char **smlHeadList = calloc(pThreadInfo->ntables, sizeof(char *));
+ assert(smlHeadList);
for (int t = 0; t < pThreadInfo->ntables; t++) {
- smlHead[t] = (char *)calloc(HEAD_BUFF_LEN, 1);
- if (NULL == smlHead[t]) {
+ char* smlHead = *((char **)smlHeadList + t * sizeof(char *));
+ smlHead = (char *)calloc(HEAD_BUFF_LEN, 1);
+ if (NULL == smlHead) {
errorPrint2("calloc failed! size:%d\n", HEAD_BUFF_LEN);
exit(EXIT_FAILURE);
}
- generateSmlHead(smlHead[t], stbInfo, pThreadInfo, t);
-
+ generateSmlHead(smlHead, stbInfo, pThreadInfo, t);
+
}
pThreadInfo->totalInsertRows = 0;
@@ -9895,11 +9905,11 @@ static void* syncWriteInterlaceSml(threadInfo *pThreadInfo, uint32_t interlaceRo
pThreadInfo->lines = calloc(g_args.reqPerReq, sizeof(char *));
if (NULL == pThreadInfo->lines) {
errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n",
- g_args.reqPerReq * sizeof(char *),
+ g_args.reqPerReq * (uint64_t)sizeof(char *),
strerror(errno));
return NULL;
}
-
+
while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) {
if ((flagSleep) && (insert_interval)) {
st = taosGetTimestampMs();
@@ -9918,7 +9928,7 @@ static void* syncWriteInterlaceSml(threadInfo *pThreadInfo, uint32_t interlaceRo
errorPrint2("Failed to alloc %d bytes, reason:%s\n",
BUFFER_SIZE, strerror(errno));
}
- generateSmlTail(pThreadInfo->lines[j], smlHead[i], stbInfo, pThreadInfo, timestamp);
+ generateSmlTail(pThreadInfo->lines[j], *((char **)smlHeadList + i * sizeof(char *)), stbInfo, pThreadInfo, timestamp);
timestamp += timeStampStep;
}
tableSeq ++;
@@ -10037,8 +10047,9 @@ static void* syncWriteInterlaceSml(threadInfo *pThreadInfo, uint32_t interlaceRo
free_of_interlace:
tmfree(pThreadInfo->lines);
for (int index = 0; index < pThreadInfo->ntables; index++) {
- free(smlHead[index]);
+ tmfree(*(smlHeadList + index*(sizeof(char*))));
}
+ tmfree(smlHeadList);
printStatPerThread(pThreadInfo);
return NULL;
}
@@ -10462,15 +10473,17 @@ static void* syncWriteProgressiveSml(threadInfo *pThreadInfo) {
pThreadInfo->samplePos = 0;
- char *smlHead[pThreadInfo->ntables];
+ char *smlHeadList = calloc(pThreadInfo->ntables, sizeof(char *));
+ assert(smlHeadList);
for (int t = 0; t < pThreadInfo->ntables; t++) {
- smlHead[t] = (char *)calloc(HEAD_BUFF_LEN, 1);
- if (NULL == smlHead[t]) {
+ char* smlHead = *((char**)smlHeadList + t * sizeof(char *));
+ smlHead = (char *)calloc(HEAD_BUFF_LEN, 1);
+ if (NULL == smlHead) {
errorPrint2("calloc failed! size:%d\n", HEAD_BUFF_LEN);
exit(EXIT_FAILURE);
}
- generateSmlHead(smlHead[t], stbInfo, pThreadInfo, t);
-
+ generateSmlHead(smlHead, stbInfo, pThreadInfo, t);
+
}
int currentPercent = 0;
int percentComplete = 0;
@@ -10481,14 +10494,14 @@ static void* syncWriteProgressiveSml(threadInfo *pThreadInfo) {
pThreadInfo->lines = calloc(g_args.reqPerReq, sizeof(char *));
if (NULL == pThreadInfo->lines) {
errorPrint2("Failed to alloc %"PRIu64" bytes, reason:%s\n",
- g_args.reqPerReq * sizeof(char *),
+ g_args.reqPerReq * (uint64_t)sizeof(char *),
strerror(errno));
return NULL;
}
-
+
for (uint64_t i = 0; i < pThreadInfo->ntables; i++) {
int64_t timestamp = pThreadInfo->start_time;
-
+
for (uint64_t j = 0; j < insertRows;) {
for (int k = 0; k < g_args.reqPerReq; k++) {
pThreadInfo->lines[k] = calloc(BUFFER_SIZE, 1);
@@ -10496,7 +10509,7 @@ static void* syncWriteProgressiveSml(threadInfo *pThreadInfo) {
errorPrint2("Failed to alloc %d bytes, reason:%s\n",
BUFFER_SIZE, strerror(errno));
}
- generateSmlTail(pThreadInfo->lines[k], smlHead[i], stbInfo, pThreadInfo, timestamp);
+ generateSmlTail(pThreadInfo->lines[k], *((char**)smlHeadList + i * sizeof(char *)), stbInfo, pThreadInfo, timestamp);
timestamp += timeStampStep;
j++;
if (j == insertRows) {
@@ -10552,8 +10565,9 @@ static void* syncWriteProgressiveSml(threadInfo *pThreadInfo) {
}
tmfree(pThreadInfo->lines);
for (int index = 0; index < pThreadInfo->ntables; index++) {
- free(smlHead[index]);
+ free(*((char**)smlHeadList + index * sizeof(char *)));
}
+ tmfree(smlHeadList);
return NULL;
}
@@ -10955,8 +10969,8 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
int64_t ntables = 0;
- uint64_t tableFrom;
-
+ uint64_t tableFrom = 0;
+
if (stbInfo) {
if (stbInfo->iface != SML_IFACE) {
int64_t limit;
@@ -11198,7 +11212,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
}
*/
-
if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) {
#ifdef WINDOWS
WSADATA wsaData;
@@ -11223,7 +11236,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
}
pThreadInfo->sockfd = sockfd;
}
-
tsem_init(&(pThreadInfo->lock_sem), 0, 0);
if (ASYNC_MODE == g_Dbs.asyncMode) {
diff --git a/src/kit/taosdump/CMakeLists.txt b/src/kit/taosdump/CMakeLists.txt
index 5b48374e8f7d54bef4d199ff9398aaf6a74b257e..1daff0c75956072e02f8439acac2850b9315235a 100644
--- a/src/kit/taosdump/CMakeLists.txt
+++ b/src/kit/taosdump/CMakeLists.txt
@@ -3,6 +3,7 @@ PROJECT(TDengine)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
+INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(. SRC)
@@ -61,12 +62,22 @@ ENDIF ()
MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER})
ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}")
+LINK_DIRECTORIES(${CMAKE_BINARY_DIR}/build/lib ${CMAKE_BINARY_DIR}/build/lib64)
+
IF (TD_LINUX)
ADD_EXECUTABLE(taosdump ${SRC})
IF (TD_SOMODE_STATIC)
- TARGET_LINK_LIBRARIES(taosdump taos_static cJson)
+ IF (TD_AVRO_SUPPORT)
+ TARGET_LINK_LIBRARIES(taosdump taos_static avro jansson)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosdump taos_static)
+ ENDIF()
ELSE ()
- TARGET_LINK_LIBRARIES(taosdump taos cJson)
+ IF (TD_AVRO_SUPPORT)
+ TARGET_LINK_LIBRARIES(taosdump taos avro jansson)
+ ELSE ()
+ TARGET_LINK_LIBRARIES(taosdump taos)
+ ENDIF ()
ENDIF ()
ENDIF ()
@@ -74,8 +85,8 @@ IF (TD_DARWIN)
# missing for macosx
# ADD_EXECUTABLE(taosdump ${SRC})
# IF (TD_SOMODE_STATIC)
- # TARGET_LINK_LIBRARIES(taosdump taos_static cJson)
+ # TARGET_LINK_LIBRARIES(taosdump taos_static jansson)
# ELSE ()
- # TARGET_LINK_LIBRARIES(taosdump taos cJson)
+ # TARGET_LINK_LIBRARIES(taosdump taos jansson)
# ENDIF ()
ENDIF ()
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index 317722ada99392965ff07cb2921a6acb6b92ef01..d552e6123fd6d3e496006a0cb79f662d5c139cc1 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -28,15 +28,24 @@
#include "tsdb.h"
#include "tutil.h"
-#define AVRO_SUPPORT 0
-#if AVRO_SUPPORT == 1
+static char **g_tsDumpInSqlFiles = NULL;
+static char g_tsCharset[63] = {0};
+
+#ifdef AVRO_SUPPORT
#include
-#endif
+#include
+
+static char **g_tsDumpInAvroFiles = NULL;
+
+static void print_json_aux(json_t *element, int indent);
+
+#endif /* AVRO_SUPPORT */
#define TSDB_SUPPORT_NANOSECOND 1
#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255
+#define MAX_PATH_LEN 4096 // max path length on linux is 4095
#define COMMAND_SIZE 65536
#define MAX_RECORDS_PER_REQ 32766
//#define DEFAULT_DUMP_FILE "taosdump.sql"
@@ -46,8 +55,6 @@
static int converStringToReadable(char *str, int size, char *buf, int bufsize);
static int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
-static void dumpCharset(FILE *fp);
-static void loadFileCharset(FILE *fp, char *fcharset);
typedef struct {
short bytes;
@@ -64,7 +71,12 @@ typedef struct {
#define performancePrint(fmt, ...) \
do { if (g_args.performance_print) \
- fprintf(stderr, "VERB: "fmt, __VA_ARGS__); } while(0)
+ fprintf(stderr, "PERF: "fmt, __VA_ARGS__); } while(0)
+
+#define warnPrint(fmt, ...) \
+ do { fprintf(stderr, "\033[33m"); \
+ fprintf(stderr, "WARN: "fmt, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m"); } while(0)
#define errorPrint(fmt, ...) \
do { fprintf(stderr, "\033[31m"); \
@@ -208,14 +220,13 @@ typedef struct {
typedef struct {
pthread_t threadID;
int32_t threadIndex;
- int32_t totalThreads;
char dbName[TSDB_DB_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
int precision;
TAOS *taos;
int64_t rowsOfDumpOut;
- int64_t tablesOfDumpOut;
- int64_t tableFrom;
+ int64_t count;
+ int64_t from;
} threadInfo;
typedef struct {
@@ -225,6 +236,44 @@ typedef struct {
int32_t totalDatabasesOfDumpOut;
} resultStatistics;
+#ifdef AVRO_SUPPORT
+
+enum enAvro_Codec {
+ AVRO_CODEC_START = 0,
+ AVRO_CODEC_NULL = AVRO_CODEC_START,
+ AVRO_CODEC_DEFLATE,
+ AVRO_CODEC_SNAPPY,
+ AVRO_CODEC_LZMA,
+ AVRO_CODEC_UNKNOWN = 255
+};
+
+char *g_avro_codec[] = {
+ "null",
+ "deflate",
+ "snappy",
+ "lzma",
+ "unknown"
+};
+
+/* avro sectin begin */
+#define RECORD_NAME_LEN 64
+#define FIELD_NAME_LEN 64
+#define TYPE_NAME_LEN 16
+
+typedef struct FieldStruct_S {
+ char name[FIELD_NAME_LEN];
+ char type[TYPE_NAME_LEN];
+} FieldStruct;
+
+typedef struct RecordSchema_S {
+ char name[RECORD_NAME_LEN];
+ char *fields;
+ int num_fields;
+} RecordSchema;
+
+/* avro section end */
+#endif
+
static int64_t g_totalDumpOutRows = 0;
SDbInfo **g_dbInfos = NULL;
@@ -276,14 +325,17 @@ static struct argp_option options[] = {
// dump format options
{"schemaonly", 's', 0, 0, "Only dump schema.", 2},
{"without-property", 'N', 0, 0, "Dump schema without properties.", 2},
- {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 2},
- {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 4},
- {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 5},
- {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 3},
- {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 3},
- {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 3},
- {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 3},
- {"debug", 'g', 0, 0, "Print debug info.", 8},
+#ifdef AVRO_SUPPORT
+ {"avro", 'v', 0, 0, "Dump apache avro format data file. By default, dump sql command sequence.", 3},
+ {"avro-codec", 'd', "snappy", 0, "Choose an avro codec among null, deflate, snappy, and lzma.", 4},
+#endif
+ {"start-time", 'S', "START_TIME", 0, "Start time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00:000+0800 or '2017-10-01 00:00:00.000+0800'", 8},
+ {"end-time", 'E', "END_TIME", 0, "End time to dump. Either epoch or ISO8601/RFC3339 format is acceptable. ISO8601 format example: 2017-10-01T00:00:00.000+0800 or 2017-10-0100:00:00.000+0800 or '2017-10-01 00:00:00.000+0800'", 9},
+ {"data-batch", 'B', "DATA_BATCH", 0, "Number of data point per insert statement. Max value is 32766. Default is 1.", 10},
+ {"max-sql-len", 'L', "SQL_LEN", 0, "Max length of one sql. Default is 65480.", 10},
+ {"table-batch", 't', "TABLE_BATCH", 0, "Number of table dumpout into one output file. Default is 1.", 10},
+ {"thread_num", 'T', "THREAD_NUM", 0, "Number of thread for dump in file. Default is 5.", 10},
+ {"debug", 'g', 0, 0, "Print debug info.", 15},
{0}
};
@@ -310,7 +362,10 @@ typedef struct arguments {
// dump format option
bool schemaonly;
bool with_property;
+#ifdef AVRO_SUPPORT
bool avro;
+ int avro_codec;
+#endif
int64_t start_time;
char humanStartTime[HUMAN_TIME_LEN];
int64_t end_time;
@@ -342,22 +397,6 @@ static resultStatistics g_resultStatistics = {0};
static FILE *g_fpOfResult = NULL;
static int g_numOfCores = 1;
-static int dumpOut();
-static int dumpIn();
-static void dumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty,
- FILE *fp);
-static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
- FILE *fp, char* dbName);
-static int getTableDes(
- char* dbName, char *table,
- TableDef *stableDes, bool isSuperTable);
-static int64_t dumpTableData(FILE *fp, char *tbName,
- char* dbName,
- int precision,
- char *jsonAvroSchema);
-static int checkParam();
-static void freeDbInfos();
-
struct arguments g_args = {
// connection option
NULL,
@@ -381,7 +420,10 @@ struct arguments g_args = {
// dump format option
false, // schemaonly
true, // with_property
- false, // avro format
+#ifdef AVRO_SUPPORT
+ false, // avro
+ AVRO_CODEC_SNAPPY, // avro_codec
+#endif
-INT64_MAX + 1, // start_time
{0}, // humanStartTime
INT64_MAX, // end_time
@@ -392,7 +434,7 @@ struct arguments g_args = {
1, // table_batch
false, // allow_sys
// other options
- 5, // thread_num
+ 8, // thread_num
0, // abort
NULL, // arg_list
0, // arg_list_len
@@ -542,6 +584,21 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
}
break;
+#ifdef AVRO_SUPPORT
+ case 'v':
+ g_args.avro = true;
+ break;
+
+ case 'd':
+ for (int i = AVRO_CODEC_START; i < AVRO_CODEC_UNKNOWN; i ++) {
+ if (0 == strcmp(arg, g_avro_codec[i])) {
+ g_args.avro_codec = i;
+ break;
+ }
+ }
+ break;
+#endif
+
case 'r':
g_args.resultFile = arg;
break;
@@ -573,9 +630,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'N':
g_args.with_property = false;
break;
- case 'v':
- g_args.avro = true;
- break;
case 'S':
// parse time here.
break;
@@ -612,8 +666,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
g_args.abort = 1;
break;
case ARGP_KEY_ARG:
- g_args.arg_list = &state->argv[state->next - 1];
- g_args.arg_list_len = state->argc - state->next + 1;
+ if (strlen(state->argv[state->next - 1])) {
+ g_args.arg_list = &state->argv[state->next - 1];
+ g_args.arg_list_len = state->argc - state->next + 1;
+ }
state->next = state->argc;
break;
@@ -1011,8 +1067,8 @@ static void dumpCreateMTableClause(
for (; counter < numOfCols; counter++) {
if (counter != count_temp) {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, ", \'%s\'",
@@ -1024,8 +1080,8 @@ static void dumpCreateMTableClause(
pstr += sprintf(pstr, ", \'%s\'", tableDes->cols[counter].value);
}
} else {
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
//pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].note);
if (tableDes->cols[counter].var_value) {
pstr += sprintf(pstr, "\'%s\'", tableDes->cols[counter].var_value);
@@ -1050,1902 +1106,2784 @@ static void dumpCreateMTableClause(
free(tmpBuf);
}
-static int convertTbDesToAvroSchema(
- char *dbName, char *tbName, TableDef *tableDes, int colCount,
- char **avroSchema)
+static int64_t getNtbCountOfStb(char *dbName, char *stbName)
{
- errorPrint("%s() LN%d TODO: covert table schema to avro schema\n",
- __func__, __LINE__);
- // {
- // "namesapce": "database name",
- // "type": "record",
- // "name": "table name",
- // "fields": [
- // {
- // "name": "col0 name",
- // "type": "long"
- // },
- // {
- // "name": "col1 name",
- // "type": ["int", "null"]
- // },
- // {
- // "name": "col2 name",
- // "type": ["float", "null"]
- // },
- // ...
- // {
- // "name": "coln name",
- // "type": ["string", "null"]
- // }
- // ]
- // }
- *avroSchema = (char *)calloc(1,
- 17 + TSDB_DB_NAME_LEN /* dbname section */
- + 17 /* type: record */
- + 11 + TSDB_TABLE_NAME_LEN /* tbname section */
- + 10 /* fields section */
- + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
- if (*avroSchema == NULL) {
- errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
+ TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ dbName, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
return -1;
}
- char *pstr = *avroSchema;
- pstr += sprintf(pstr,
- "{\"namespace\": \"%s\", \"type\": \"record\", \"name\": \"%s\", \"fields\": [",
- dbName, tbName);
- for (int i = 0; i < colCount; i ++) {
- if (0 == i) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": \"%s\"",
- tableDes->cols[i].field, "long");
- } else {
- if (strcasecmp(tableDes->cols[i].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]",
- tableDes->cols[i].field, "string");
- } else {
- pstr += sprintf(pstr,
- "{\"name\": \"%s\", \"type\": [\"%s\", \"null\"]",
- tableDes->cols[i].field, tableDes->cols[i].type);
- }
- }
- if ((i != (colCount -1))
- && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
- pstr += sprintf(pstr, "},");
- } else {
- pstr += sprintf(pstr, "}");
- break;
- }
+ int64_t count = 0;
+
+ char command[COMMAND_SIZE];
+
+ sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
+
+ TAOS_RES *res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
}
- pstr += sprintf(pstr, "]}");
+ TAOS_ROW row = NULL;
- debugPrint("%s() LN%d, avroSchema: %s\n", __func__, __LINE__, *avroSchema);
+ if ((row = taos_fetch_row(res)) != NULL) {
+ count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
+ }
- return 0;
+ taos_close(taos);
+ return count;
}
-static int64_t dumpNormalTable(
- char *dbName,
- char *stable,
- char *tbName,
- int precision,
- FILE *fp
- ) {
+static int getTableDes(
+ TAOS *taos,
+ char* dbName, char *table,
+ TableDef *tableDes, bool isSuperTable) {
+ TAOS_ROW row = NULL;
+ TAOS_RES* res = NULL;
int colCount = 0;
- TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
- + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
- colCount = getTableDes(dbName, tbName, tableDes, false);
-
- if (colCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- tbName);
- free(tableDes);
- return -1;
- }
+ char sqlstr[COMMAND_SIZE];
+ sprintf(sqlstr, "describe %s.%s;", dbName, table);
- // create child-table using super-table
- dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
- } else { // dump table definition
- colCount = getTableDes(dbName, tbName, tableDes, false);
+ res = taos_query(taos, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ return -1;
+ }
- if (colCount < 0) {
- errorPrint("%s() LN%d, failed to get table[%s] schema\n",
- __func__,
- __LINE__,
- tbName);
- free(tableDes);
- return -1;
- }
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- // create normal-table or super-table
- dumpCreateTableClause(tableDes, colCount, fp, dbName);
+ tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
+ while ((row = taos_fetch_row(res)) != NULL) {
+ tstrncpy(tableDes->cols[colCount].field,
+ (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
+ min(TSDB_COL_NAME_LEN,
+ fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
+ tstrncpy(tableDes->cols[colCount].type,
+ (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
+ min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
+ tableDes->cols[colCount].length =
+ *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
+ tstrncpy(tableDes->cols[colCount].note,
+ (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
+ min(COL_NOTE_LEN,
+ fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
+ colCount++;
}
- char *jsonAvroSchema = NULL;
- if (g_args.avro) {
- if (0 != convertTbDesToAvroSchema(
- dbName, tbName, tableDes, colCount, &jsonAvroSchema)) {
- errorPrint("%s() LN%d, convertTbDesToAvroSchema failed\n",
- __func__,
- __LINE__);
- freeTbDes(tableDes);
- return -1;
- }
- }
+ taos_free_result(res);
+ res = NULL;
- int64_t ret = 0;
- if (!g_args.schemaonly) {
- ret = dumpTableData(fp, tbName, dbName, precision,
- jsonAvroSchema);
+ if (isSuperTable) {
+ return colCount;
}
- tfree(jsonAvroSchema);
- freeTbDes(tableDes);
- return ret;
-}
+ // if child-table have tag, using select tagName from table to get tagValue
+ for (int i = 0 ; i < colCount; i++) {
+ if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
-static int64_t dumpNormalTableBelongStb(
- SDbInfo *dbInfo, char *stbName, char *ntbName)
-{
- int64_t count = 0;
+ sprintf(sqlstr, "select %s from %s.%s",
+ tableDes->cols[i].field, dbName, table);
- char tmpBuf[4096] = {0};
- FILE *fp = NULL;
+ res = taos_query(taos, sqlstr);
+ code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
+ }
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.sql",
- g_args.outpath, dbInfo->name, ntbName);
- } else {
- sprintf(tmpBuf, "%s.%s.sql",
- dbInfo->name, ntbName);
- }
+ fields = taos_fetch_fields(res);
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
- }
+ row = taos_fetch_row(res);
+ if (NULL == row) {
+ errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
+ __func__, __LINE__, sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
+ }
- count = dumpNormalTable(
- dbInfo->name,
- stbName,
- ntbName,
- getPrecisionByString(dbInfo->precision),
- fp);
+ if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
+ sprintf(tableDes->cols[i].note, "%s", "NUL");
+ sprintf(tableDes->cols[i].value, "%s", "NULL");
+ taos_free_result(res);
+ res = NULL;
+ continue;
+ }
- fclose(fp);
- return count;
-}
+ int32_t* length = taos_fetch_lengths(res);
-static int64_t dumpNormalTableWithoutStb(SDbInfo *dbInfo, char *ntbName)
-{
- int64_t count = 0;
-
- char tmpBuf[4096] = {0};
- FILE *fp = NULL;
+ switch (fields[0].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ sprintf(tableDes->cols[i].value, "%d",
+ ((((int32_t)(*((char *)
+ row[TSDB_SHOW_TABLES_NAME_INDEX])))==1)
+ ?1:0));
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ sprintf(tableDes->cols[i].value, "%d",
+ *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ sprintf(tableDes->cols[i].value, "%d",
+ *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_INT:
+ sprintf(tableDes->cols[i].value, "%d",
+ *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ sprintf(tableDes->cols[i].value, "%" PRId64 "",
+ *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ sprintf(tableDes->cols[i].value, "%f",
+ GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ sprintf(tableDes->cols[i].value, "%f",
+ GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ memset(tableDes->cols[i].value, 0,
+ sizeof(tableDes->cols[i].value));
+ int len = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ // FIXME for long value
+ if (len < (COL_VALUEBUF_LEN - 2)) {
+ converStringToReadable(
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ length[0],
+ tableDes->cols[i].value,
+ len);
+ } else {
+ tableDes->cols[i].var_value = calloc(1, len * 2);
+ if (tableDes->cols[i].var_value == NULL) {
+ errorPrint("%s() LN%d, memory alalocation failed!\n",
+ __func__, __LINE__);
+ taos_free_result(res);
+ return -1;
+ }
+ converStringToReadable((char *)row[0],
+ length[0],
+ (char *)(tableDes->cols[i].var_value), len);
+ }
+ break;
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.sql",
- g_args.outpath, dbInfo->name, ntbName);
- } else {
- sprintf(tmpBuf, "%s.%s.sql",
- dbInfo->name, ntbName);
- }
+ case TSDB_DATA_TYPE_NCHAR:
+ memset(tableDes->cols[i].value, 0,
+ sizeof(tableDes->cols[i].note));
+ int nlen = strlen((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ if (nlen < (COL_VALUEBUF_LEN-2)) {
+ char tbuf[COL_VALUEBUF_LEN-2]; // need reserve 2 bytes for ' '
+ convertNCharToReadable(
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ length[0], tbuf, COL_VALUEBUF_LEN-2);
+ sprintf(tableDes->cols[i].value, "%s", tbuf);
+ } else {
+ tableDes->cols[i].var_value = calloc(1, nlen * 4);
+ if (tableDes->cols[i].var_value == NULL) {
+ errorPrint("%s() LN%d, memory alalocation failed!\n",
+ __func__, __LINE__);
+ taos_free_result(res);
+ return -1;
+ }
+ converStringToReadable(
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ length[0],
+ (char *)(tableDes->cols[i].var_value), nlen);
+ }
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ sprintf(tableDes->cols[i].value, "%" PRId64 "",
+ *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+#if 0
+ if (!g_args.mysqlFlag) {
+ sprintf(tableDes->cols[i].value, "%" PRId64 "",
+ *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf,
+ (int)(ts % 1000));
+ }
+#endif
+ break;
+ default:
+ break;
+ }
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
+ taos_free_result(res);
}
- count = dumpNormalTable(
- dbInfo->name,
- NULL,
- ntbName,
- getPrecisionByString(dbInfo->precision),
- fp);
-
- fclose(fp);
- return count;
+ return colCount;
}
-static void *dumpNtbOfDb(void *arg) {
- threadInfo *pThreadInfo = (threadInfo *)arg;
-
- debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom);
- debugPrint("dump table count = \t%"PRId64"\n",
- pThreadInfo->tablesOfDumpOut);
+static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
+ FILE *fp, char* dbName) {
+ int counter = 0;
+ int count_temp = 0;
+ char sqlstr[COMMAND_SIZE];
- FILE *fp = NULL;
- char tmpBuf[4096] = {0};
+ char* pstr = sqlstr;
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%d.sql",
- g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.%d.sql",
- pThreadInfo->dbName, pThreadInfo->threadIndex);
- }
+ pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
+ dbName, tableDes->name);
- fp = fopen(tmpBuf, "w");
+ for (; counter < numOfCols; counter++) {
+ if (tableDes->cols[counter].note[0] != '\0') break;
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return NULL;
- }
+ if (counter == 0) {
+ pstr += sprintf(pstr, " (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ } else {
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ }
- int64_t count;
- for (int64_t i = 0; i < pThreadInfo->tablesOfDumpOut; i++) {
- debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
- pThreadInfo->threadIndex, i,
- ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name);
- count = dumpNormalTable(
- pThreadInfo->dbName,
- ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->stable,
- ((TableInfo *)(g_tablesList + pThreadInfo->tableFrom+i))->name,
- pThreadInfo->precision,
- fp);
- if (count < 0) {
- break;
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
+ pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
}
}
- fclose(fp);
- return NULL;
-}
-
-static void *dumpNormalTablesOfStb(void *arg) {
- threadInfo *pThreadInfo = (threadInfo *)arg;
-
- debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->tableFrom);
- debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->tablesOfDumpOut);
-
- char command[COMMAND_SIZE];
+ count_temp = counter;
- sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
- pThreadInfo->dbName, pThreadInfo->stbName,
- pThreadInfo->tablesOfDumpOut, pThreadInfo->tableFrom);
+ for (; counter < numOfCols; counter++) {
+ if (counter == count_temp) {
+ pstr += sprintf(pstr, ") TAGS (%s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ } else {
+ pstr += sprintf(pstr, ", %s %s",
+ tableDes->cols[counter].field, tableDes->cols[counter].type);
+ }
- TAOS_RES *res = taos_query(pThreadInfo->taos, command);
- int32_t code = taos_errno(res);
- if (code) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- return NULL;
+ if (0 == strcasecmp(tableDes->cols[counter].type, "binary")
+ || 0 == strcasecmp(tableDes->cols[counter].type, "nchar")) {
+ pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
+ }
}
- FILE *fp = NULL;
- char tmpBuf[4096] = {0};
+ pstr += sprintf(pstr, ");");
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/%s.%s.%d.sql",
- g_args.outpath,
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- pThreadInfo->threadIndex);
- } else {
- sprintf(tmpBuf, "%s.%s.%d.sql",
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- pThreadInfo->threadIndex);
- }
+ debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
+ return fprintf(fp, "%s\n\n", sqlstr);
+}
- fp = fopen(tmpBuf, "w");
+static int dumpStableClasuse(TAOS *taos, SDbInfo *dbInfo, char *stbName, FILE *fp)
+{
+ uint64_t sizeOfTableDes =
+ (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return NULL;
+ TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
+ if (NULL == tableDes) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, sizeOfTableDes);
+ exit(-1);
}
- TAOS_ROW row = NULL;
- int64_t i = 0;
- int64_t count;
- while((row = taos_fetch_row(res)) != NULL) {
- debugPrint("[%d] sub table %"PRId64": name: %s\n",
- pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ int colCount = getTableDes(taos, dbInfo->name,
+ stbName, tableDes, true);
- count = dumpNormalTable(
- pThreadInfo->dbName,
- pThreadInfo->stbName,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
- pThreadInfo->precision,
- fp);
- if (count < 0) {
- break;
- }
+ if (colCount < 0) {
+ free(tableDes);
+ errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
+ __func__, __LINE__, stbName);
+ exit(-1);
}
- fclose(fp);
- return NULL;
+ dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
+ free(tableDes);
+
+ return 0;
}
-static int64_t dumpNtbOfDbByThreads(
- SDbInfo *dbInfo,
- int64_t ntbCount)
+static int64_t dumpCreateSTableClauseOfDb(
+ SDbInfo *dbInfo, FILE *fp)
{
- if (ntbCount <= 0) {
+ TAOS *taos = taos_connect(g_args.host,
+ g_args.user, g_args.password, dbInfo->name, g_args.port);
+ if (NULL == taos) {
+ errorPrint(
+ "Failed to connect to TDengine server %s by specified database %s\n",
+ g_args.host, dbInfo->name);
return 0;
}
- int threads = g_args.thread_num;
+ TAOS_ROW row;
+ char command[COMMAND_SIZE] = {0};
- int64_t a = ntbCount / threads;
- if (a < 1) {
- threads = ntbCount;
- a = 1;
- }
-
- assert(threads);
- int64_t b = ntbCount % threads;
-
- threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- assert(pids);
- assert(infos);
+ sprintf(command, "SHOW %s.STABLES", dbInfo->name);
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- pThreadInfo->taos = taos_connect(
- g_args.host,
- g_args.user,
- g_args.password,
- dbInfo->name,
- g_args.port
- );
- if (NULL == pThreadInfo->taos) {
- errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
- __func__,
- __LINE__,
- taos_errstr(NULL));
- free(pids);
- free(infos);
+ TAOS_RES* res = taos_query(taos, command);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ exit(-1);
+ }
- return -1;
+ int64_t superTblCnt = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ if (0 == dumpStableClasuse(taos, dbInfo,
+ row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
+ superTblCnt ++;
}
-
- pThreadInfo->threadIndex = i;
- pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0:
- ((threadInfo *)(infos + i - 1))->tableFrom +
- ((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
- strcpy(pThreadInfo->dbName, dbInfo->name);
- pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
-
- pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
}
- for (int64_t i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
+ taos_free_result(res);
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- taos_close(pThreadInfo->taos);
- }
+ fprintf(g_fpOfResult,
+ "# super table counter: %"PRId64"\n",
+ superTblCnt);
+ g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
- free(pids);
- free(infos);
+ taos_close(taos);
- return 0;
+ return superTblCnt;
}
-static int64_t getNtbCountOfStb(char *dbName, char *stbName)
-{
- TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
- dbName, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- return -1;
- }
+static void dumpCreateDbClause(
+ SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
+ char sqlstr[TSDB_MAX_SQL_LEN] = {0};
- int64_t count = 0;
+ char *pstr = sqlstr;
+ pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
+ if (isDumpProperty) {
+ pstr += sprintf(pstr,
+ "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
+ dbInfo->replica, dbInfo->quorum, dbInfo->days,
+ dbInfo->keeplist,
+ dbInfo->cache,
+ dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
+ dbInfo->fsync,
+ dbInfo->cachelast,
+ dbInfo->comp, dbInfo->precision, dbInfo->update);
+ }
- char command[COMMAND_SIZE];
+ pstr += sprintf(pstr, ";");
+ fprintf(fp, "%s\n\n", sqlstr);
+}
- sprintf(command, "SELECT COUNT(TBNAME) FROM %s.%s", dbName, stbName);
+static FILE* openDumpInFile(char *fptr) {
+ wordexp_t full_path;
- TAOS_RES *res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
+ if (wordexp(fptr, &full_path, 0) != 0) {
+ errorPrint("illegal file name: %s\n", fptr);
+ return NULL;
}
- TAOS_ROW row = NULL;
+ char *fname = full_path.we_wordv[0];
- if ((row = taos_fetch_row(res)) != NULL) {
- count = *(int64_t*)row[TSDB_SHOW_TABLES_NAME_INDEX];
+ FILE *f = NULL;
+ if ((fname) && (strlen(fname) > 0)) {
+ f = fopen(fname, "r");
+ if (f == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, fname);
+ }
}
- taos_close(taos);
- return count;
+ wordfree(&full_path);
+ return f;
}
-static int64_t dumpNtbOfStbByThreads(
- SDbInfo *dbInfo, char *stbName)
+static uint64_t getFilesNum(char *ext)
{
- int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
-
- if (ntbCount <= 0) {
- return 0;
- }
-
- int threads = g_args.thread_num;
+ uint64_t count = 0;
- int64_t a = ntbCount / threads;
- if (a < 1) {
- threads = ntbCount;
- a = 1;
- }
+ int namelen, extlen;
+ struct dirent *pDirent;
+ DIR *pDir;
- assert(threads);
- int64_t b = ntbCount % threads;
+ extlen = strlen(ext);
- pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
- threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
- assert(pids);
- assert(infos);
+ bool isSql = (0 == strcmp(ext, "sql"));
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- pThreadInfo->taos = taos_connect(
- g_args.host,
- g_args.user,
- g_args.password,
- dbInfo->name,
- g_args.port
- );
- if (NULL == pThreadInfo->taos) {
- errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
- __func__,
- __LINE__,
- taos_errstr(NULL));
- free(pids);
- free(infos);
+ pDir = opendir(g_args.inpath);
+ if (pDir != NULL) {
+ while ((pDirent = readdir(pDir)) != NULL) {
+ namelen = strlen (pDirent->d_name);
- return -1;
+ if (namelen > extlen) {
+ if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
+ if (isSql) {
+ if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
+ continue;
+ }
+ }
+ verbosePrint("%s found\n", pDirent->d_name);
+ count ++;
+ }
+ }
}
-
- pThreadInfo->threadIndex = i;
- pThreadInfo->tablesOfDumpOut = (itableFrom = (i==0)?0:
- ((threadInfo *)(infos + i - 1))->tableFrom +
- ((threadInfo *)(infos + i - 1))->tablesOfDumpOut;
- strcpy(pThreadInfo->dbName, dbInfo->name);
- pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
-
- strcpy(pThreadInfo->stbName, stbName);
- pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
- }
-
- for (int64_t i = 0; i < threads; i++) {
- pthread_join(pids[i], NULL);
- }
-
- int64_t records = 0;
- for (int64_t i = 0; i < threads; i++) {
- threadInfo *pThreadInfo = infos + i;
- records += pThreadInfo->rowsOfDumpOut;
- taos_close(pThreadInfo->taos);
+ closedir (pDir);
}
- free(pids);
- free(infos);
-
- return records;
+ debugPrint("%"PRId64" .%s files found!\n", count, ext);
+ return count;
}
-static int dumpStableClasuse(SDbInfo *dbInfo, char *stbName, FILE *fp)
+static void freeFileList(char **fileList, int64_t count)
{
- uint64_t sizeOfTableDes =
- (uint64_t)(sizeof(TableDef) + sizeof(ColDes) * TSDB_MAX_COLUMNS);
-
- TableDef *tableDes = (TableDef *)calloc(1, sizeOfTableDes);
- if (NULL == tableDes) {
- errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
- __func__, __LINE__, sizeOfTableDes);
- exit(-1);
- }
-
- int colCount = getTableDes(dbInfo->name,
- stbName, tableDes, true);
-
- if (colCount < 0) {
- free(tableDes);
- errorPrint("%s() LN%d, failed to get stable[%s] schema\n",
- __func__, __LINE__, stbName);
- exit(-1);
+ for (int64_t i = 0; i < count; i++) {
+ tfree(fileList[i]);
}
-
- dumpCreateTableClause(tableDes, colCount, fp, dbInfo->name);
- free(tableDes);
-
- return 0;
+ tfree(fileList);
}
-static int64_t dumpCreateSTableClauseOfDb(
- SDbInfo *dbInfo, FILE *fp)
+static void createDumpinList(char *ext, int64_t count)
{
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbInfo->name, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbInfo->name);
- return 0;
- }
-
- TAOS_ROW row;
- char command[COMMAND_SIZE] = {0};
+ bool isSql = (0 == strcmp(ext, "sql"));
- sprintf(command, "SHOW %s.STABLES", dbInfo->name);
-
- TAOS_RES* res = taos_query(taos, command);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- exit(-1);
- }
+ if (isSql) {
+ g_tsDumpInSqlFiles = (char **)calloc(count, sizeof(char *));
+ assert(g_tsDumpInSqlFiles);
- int64_t superTblCnt = 0;
- while ((row = taos_fetch_row(res)) != NULL) {
- if (0 == dumpStableClasuse(dbInfo, row[TSDB_SHOW_TABLES_NAME_INDEX], fp)) {
- superTblCnt ++;
+ for (int64_t i = 0; i < count; i++) {
+ g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ assert(g_tsDumpInSqlFiles[i]);
}
}
+#ifdef AVRO_SUPPORT
+ else {
+ g_tsDumpInAvroFiles = (char **)calloc(count, sizeof(char *));
+ assert(g_tsDumpInAvroFiles);
- taos_free_result(res);
+ for (int64_t i = 0; i < count; i++) {
+ g_tsDumpInAvroFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ assert(g_tsDumpInAvroFiles[i]);
+ }
- fprintf(g_fpOfResult,
- "# super table counter: %"PRId64"\n",
- superTblCnt);
- g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt;
+ }
+#endif
- taos_close(taos);
+ int namelen, extlen;
+ struct dirent *pDirent;
+ DIR *pDir;
+
+ extlen = strlen(ext);
+
+ count = 0;
+ pDir = opendir(g_args.inpath);
+ if (pDir != NULL) {
+ while ((pDirent = readdir(pDir)) != NULL) {
+ namelen = strlen (pDirent->d_name);
+
+ if (namelen > extlen) {
+ if (strcmp (ext, &(pDirent->d_name[namelen - extlen])) == 0) {
+ verbosePrint("%s found\n", pDirent->d_name);
+ if (isSql) {
+ if (0 == strcmp(pDirent->d_name, "dbs.sql")) {
+ continue;
+ }
+ strncpy(g_tsDumpInSqlFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
+ }
+#ifdef AVRO_SUPPORT
+ else {
+ strncpy(g_tsDumpInAvroFiles[count++], pDirent->d_name, MAX_FILE_NAME_LEN);
+ }
+#endif
+ }
+ }
+ }
+ closedir (pDir);
+ }
- return superTblCnt;
+ debugPrint("%"PRId64" .%s files filled to list!\n", count, ext);
}
-static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
+#ifdef AVRO_SUPPORT
+
+static int convertTbDesToJson(
+ char *dbName, char *tbName, TableDef *tableDes, int colCount,
+ char **jsonSchema)
{
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbInfo->name, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbInfo->name);
- return 0;
+ // {
+ // "type": "record",
+ // "name": "dbname.tbname",
+ // "fields": [
+ // {
+ // "name": "col0 name",
+ // "type": "long"
+ // },
+ // {
+ // "name": "col1 name",
+ // "type": "int"
+ // },
+ // {
+ // "name": "col2 name",
+ // "type": "float"
+ // },
+ // {
+ // "name": "col3 name",
+ // "type": "boolean"
+ // },
+ // ...
+ // {
+ // "name": "coln name",
+ // "type": "string"
+ // }
+ // ]
+ // }
+ *jsonSchema = (char *)calloc(1,
+ 17 + TSDB_DB_NAME_LEN /* dbname section */
+ + 17 /* type: record */
+ + 11 + TSDB_TABLE_NAME_LEN /* tbname section */
+ + 10 /* fields section */
+ + (TSDB_COL_NAME_LEN + 11 + 16) * colCount + 4); /* fields section */
+ if (*jsonSchema == NULL) {
+ errorPrint("%s() LN%d, memory allocation failed!\n", __func__, __LINE__);
+ return -1;
}
- char command[COMMAND_SIZE];
- TAOS_RES *result;
- int32_t code;
+ char *pstr = *jsonSchema;
+ pstr += sprintf(pstr,
+ "{\"type\": \"record\", \"name\": \"%s.%s\", \"fields\": [",
+ dbName, tbName);
+ for (int i = 0; i < colCount; i ++) {
+ if (0 == i) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "long");
+ } else {
+ if (strcasecmp(tableDes->cols[i].type, "binary") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "string");
+ } else if (strcasecmp(tableDes->cols[i].type, "nchar") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "bytes");
+ } else if (strcasecmp(tableDes->cols[i].type, "bool") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "boolean");
+ } else if (strcasecmp(tableDes->cols[i].type, "tinyint") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "int");
+ } else if (strcasecmp(tableDes->cols[i].type, "smallint") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "int");
+ } else if (strcasecmp(tableDes->cols[i].type, "bigint") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "long");
+ } else if (strcasecmp(tableDes->cols[i].type, "timestamp") == 0) {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field, "long");
+ } else {
+ pstr += sprintf(pstr,
+ "{\"name\": \"%s\", \"type\": \"%s\"",
+ tableDes->cols[i].field,
+ strtolower(tableDes->cols[i].type, tableDes->cols[i].type));
+ }
+ }
+ if ((i != (colCount -1))
+ && (strcmp(tableDes->cols[i + 1].note, "TAG") != 0)) {
+ pstr += sprintf(pstr, "},");
+ } else {
+ pstr += sprintf(pstr, "}");
+ break;
+ }
+ }
- sprintf(command, "USE %s", dbInfo->name);
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- errorPrint("invalid database %s, reason: %s\n",
- dbInfo->name, taos_errstr(result));
- taos_close(taos);
- return 0;
+ pstr += sprintf(pstr, "]}");
+
+ debugPrint("%s() LN%d, jsonSchema:\n %s\n", __func__, __LINE__, *jsonSchema);
+
+ return 0;
+}
+
+static void print_json_indent(int indent) {
+ int i;
+ for (i = 0; i < indent; i++) {
+ putchar(' ');
}
+}
- sprintf(command, "SHOW TABLES");
- result = taos_query(taos, command);
- code = taos_errno(result);
- if (code != 0) {
- errorPrint("Failed to show %s\'s tables, reason: %s\n",
- dbInfo->name, taos_errstr(result));
- taos_close(taos);
- return 0;
+const char *json_plural(size_t count) { return count == 1 ? "" : "s"; }
+
+static void print_json_object(json_t *element, int indent) {
+ size_t size;
+ const char *key;
+ json_t *value;
+
+ print_json_indent(indent);
+ size = json_object_size(element);
+
+ printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size));
+ json_object_foreach(element, key, value) {
+ print_json_indent(indent + 2);
+ printf("JSON Key: \"%s\"\n", key);
+ print_json_aux(value, indent + 2);
}
+}
- g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
+static void print_json_array(json_t *element, int indent) {
+ size_t i;
+ size_t size = json_array_size(element);
+ print_json_indent(indent);
- TAOS_ROW row;
- int64_t count = 0;
- while(NULL != (row = taos_fetch_row(result))) {
- debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
- __func__, __LINE__,
- count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- tstrncpy(((TableInfo *)(g_tablesList + count))->name,
- (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
- char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
- if (stbName) {
- tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
- (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
- ((TableInfo *)(g_tablesList + count))->belongStb = true;
- }
- count ++;
+ printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size));
+ for (i = 0; i < size; i++) {
+ print_json_aux(json_array_get(element, i), indent + 2);
}
- taos_close(taos);
+}
- int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
+static void print_json_string(json_t *element, int indent) {
+ print_json_indent(indent);
+ printf("JSON String: \"%s\"\n", json_string_value(element));
+}
- free(g_tablesList);
- g_tablesList = NULL;
+static void print_json_integer(json_t *element, int indent) {
+ print_json_indent(indent);
+ printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element));
+}
- return records;
+static void print_json_real(json_t *element, int indent) {
+ print_json_indent(indent);
+ printf("JSON Real: %f\n", json_real_value(element));
}
-static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
+static void print_json_true(json_t *element, int indent) {
+ (void)element;
+ print_json_indent(indent);
+ printf("JSON True\n");
+}
+
+static void print_json_false(json_t *element, int indent) {
+ (void)element;
+ print_json_indent(indent);
+ printf("JSON False\n");
+}
+
+static void print_json_null(json_t *element, int indent) {
+ (void)element;
+ print_json_indent(indent);
+ printf("JSON Null\n");
+}
+
+static void print_json_aux(json_t *element, int indent)
{
- dumpCreateDbClause(dbInfo, g_args.with_property, fp);
+ switch(json_typeof(element)) {
+ case JSON_OBJECT:
+ print_json_object(element, indent);
+ break;
- fprintf(g_fpOfResult, "\n#### database: %s\n",
- dbInfo->name);
- g_resultStatistics.totalDatabasesOfDumpOut++;
+ case JSON_ARRAY:
+ print_json_array(element, indent);
+ break;
- dumpCreateSTableClauseOfDb(dbInfo, fp);
+ case JSON_STRING:
+ print_json_string(element, indent);
+ break;
- return dumpNTablesOfDb(dbInfo);
-}
+ case JSON_INTEGER:
+ print_json_integer(element, indent);
+ break;
-static int dumpOut() {
- TAOS *taos = NULL;
- TAOS_RES *result = NULL;
+ case JSON_REAL:
+ print_json_real(element, indent);
+ break;
- TAOS_ROW row;
- FILE *fp = NULL;
- int32_t count = 0;
+ case JSON_TRUE:
+ print_json_true(element, indent);
+ break;
- char tmpBuf[4096] = {0};
- if (g_args.outpath[0] != 0) {
- sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
- } else {
- sprintf(tmpBuf, "dbs.sql");
- }
+ case JSON_FALSE:
+ print_json_false(element, indent);
+ break;
- fp = fopen(tmpBuf, "w");
- if (fp == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, tmpBuf);
- return -1;
+ case JSON_NULL:
+ print_json_null(element, indent);
+ break;
+
+ default:
+ fprintf(stderr, "unrecongnized JSON type %d\n", json_typeof(element));
}
+}
- g_args.dumpDbCount = getDumpDbCount();
- debugPrint("%s() LN%d, dump db count: %d\n",
- __func__, __LINE__, g_args.dumpDbCount);
+static void print_json(json_t *root) { print_json_aux(root, 0); }
- if (0 == g_args.dumpDbCount) {
- errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
- fclose(fp);
- return -1;
+static json_t *load_json(char *jsonbuf)
+{
+ json_t *root;
+ json_error_t error;
+
+ root = json_loads(jsonbuf, 0, &error);
+
+ if (root) {
+ return root;
+ } else {
+ fprintf(stderr, "json error on line %d: %s\n", error.line, error.text);
+ return NULL;
}
+}
- g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
- if (g_dbInfos == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n",
+static RecordSchema *parse_json_to_recordschema(json_t *element)
+{
+ RecordSchema *recordSchema = malloc(sizeof(RecordSchema));
+ assert(recordSchema);
+
+ if (JSON_OBJECT != json_typeof(element)) {
+ fprintf(stderr, "%s() LN%d, json passed is not an object\n",
__func__, __LINE__);
- goto _exit_failure;
+ return NULL;
}
- char command[COMMAND_SIZE];
+ const char *key;
+ json_t *value;
+
+ json_object_foreach(element, key, value) {
+ if (0 == strcmp(key, "name")) {
+ tstrncpy(recordSchema->name, json_string_value(value), RECORD_NAME_LEN-1);
+ } else if (0 == strcmp(key, "fields")) {
+ if (JSON_ARRAY == json_typeof(value)) {
+
+ size_t i;
+ size_t size = json_array_size(value);
+
+ verbosePrint("%s() LN%d, JSON Array of %lld element%s:\n",
+ __func__, __LINE__,
+ (long long)size, json_plural(size));
+
+ recordSchema->num_fields = size;
+ recordSchema->fields = malloc(sizeof(FieldStruct) * size);
+ assert(recordSchema->fields);
+
+ for (i = 0; i < size; i++) {
+ FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
+ json_t *arr_element = json_array_get(value, i);
+ const char *ele_key;
+ json_t *ele_value;
+
+ json_object_foreach(arr_element, ele_key, ele_value) {
+ if (0 == strcmp(ele_key, "name")) {
+ tstrncpy(field->name, json_string_value(ele_value), FIELD_NAME_LEN-1);
+ } else if (0 == strcmp(ele_key, "type")) {
+ if (JSON_STRING == json_typeof(ele_value)) {
+ tstrncpy(field->type, json_string_value(ele_value), TYPE_NAME_LEN-1);
+ } else if (JSON_OBJECT == json_typeof(ele_value)) {
+ const char *obj_key;
+ json_t *obj_value;
+
+ json_object_foreach(ele_value, obj_key, obj_value) {
+ if (0 == strcmp(obj_key, "type")) {
+ if (JSON_STRING == json_typeof(obj_value)) {
+ tstrncpy(field->type,
+ json_string_value(obj_value), TYPE_NAME_LEN-1);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ } else {
+ fprintf(stderr, "%s() LN%d, fields have no array\n",
+ __func__, __LINE__);
+ return NULL;
+ }
- /* Connect to server */
- taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- goto _exit_failure;
+ break;
+ }
}
- /* --------------------------------- Main Code -------------------------------- */
- /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
- /* */
- dumpCharset(fp);
+ return recordSchema;
+}
- sprintf(command, "show databases");
- result = taos_query(taos, command);
- int32_t code = taos_errno(result);
+static void freeRecordSchema(RecordSchema *recordSchema)
+{
+ if (recordSchema) {
+ if (recordSchema->fields) {
+ free(recordSchema->fields);
+ }
+ free(recordSchema);
+ }
+}
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, command, taos_errstr(result));
- goto _exit_failure;
+static int64_t writeResultToAvro(
+ char *avroFilename,
+ char *jsonSchema,
+ TAOS_RES *res)
+{
+ avro_schema_t schema;
+ if (avro_schema_from_json_length(jsonSchema, strlen(jsonSchema), &schema)) {
+ errorPrint("%s() LN%d, Unable to parse:\n%s \nto schema\nerror message: %s\n",
+ __func__, __LINE__, jsonSchema, avro_strerror());
+ exit(EXIT_FAILURE);
}
- TAOS_FIELD *fields = taos_fetch_fields(result);
+ json_t *json_root = load_json(jsonSchema);
+ debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
- while ((row = taos_fetch_row(result)) != NULL) {
- // sys database name : 'log', but subsequent version changed to 'log'
- if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
- && (!g_args.allow_sys)) {
- continue;
+ RecordSchema *recordSchema;
+ if (json_root) {
+ if (g_args.debug_print || g_args.verbose_print) {
+ print_json(json_root);
}
- if (g_args.databases) { // input multi dbs
- if (inDatabasesSeq(
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
- continue;
- }
- } else if (!g_args.all_databases) { // only input one db
- if (strncasecmp(g_args.arg_list[0],
- (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
- continue;
+ recordSchema = parse_json_to_recordschema(json_root);
+ if (NULL == recordSchema) {
+ fprintf(stderr, "Failed to parse json to recordschema\n");
+ exit(EXIT_FAILURE);
}
- g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
- if (g_dbInfos[count] == NULL) {
- errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
- __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
- goto _exit_failure;
- }
+ json_decref(json_root);
+ } else {
+ errorPrint("json:\n%s\n can't be parsed by jansson\n", jsonSchema);
+ exit(EXIT_FAILURE);
+ }
- okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
- tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
- min(TSDB_DB_NAME_LEN,
- fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
- if (g_args.with_property) {
- g_dbInfos[count]->ntables =
- *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
- g_dbInfos[count]->vgroups =
- *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
- g_dbInfos[count]->replica =
- *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
- g_dbInfos[count]->quorum =
- *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
- g_dbInfos[count]->days =
- *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
+ avro_file_writer_t db;
- tstrncpy(g_dbInfos[count]->keeplist,
- (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
- min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
- //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
- //g_dbInfos[count]->daysToKeep1;
- //g_dbInfos[count]->daysToKeep2;
- g_dbInfos[count]->cache =
- *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
- g_dbInfos[count]->blocks =
- *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
- g_dbInfos[count]->minrows =
- *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
- g_dbInfos[count]->maxrows =
- *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
- g_dbInfos[count]->wallevel =
- *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
- g_dbInfos[count]->fsync =
- *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
- g_dbInfos[count]->comp =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
- g_dbInfos[count]->cachelast =
- (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
+ int rval = avro_file_writer_create_with_codec
+ (avroFilename, schema, &db, g_avro_codec[g_args.avro_codec], 0);
+ if (rval) {
+ errorPrint("There was an error creating %s. reason: %s\n",
+ avroFilename, avro_strerror());
+ exit(EXIT_FAILURE);
+ }
- tstrncpy(g_dbInfos[count]->precision,
- (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
- DB_PRECISION_LEN);
- g_dbInfos[count]->update =
- *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
- }
- count++;
+ TAOS_ROW row = NULL;
- if (g_args.databases) {
- if (count > g_args.dumpDbCount)
- break;
- } else if (!g_args.all_databases) {
- if (count >= 1)
- break;
- }
- }
+ int numFields = taos_field_count(res);
+ assert(numFields > 0);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
- if (count == 0) {
- errorPrint("%d databases valid to dump\n", count);
- goto _exit_failure;
- }
+ avro_value_iface_t *wface =
+ avro_generic_class_from_schema(schema);
- taos_close(taos);
+ avro_value_t record;
+ avro_generic_value_new(wface, &record);
- if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
- for (int i = 0; i < count; i++) {
- int64_t records = 0;
- records = dumpWholeDatabase(g_dbInfos[i], fp);
- if (records >= 0) {
- okPrint("Database %s dumped\n", g_dbInfos[i]->name);
- g_totalDumpOutRows += records;
+ int64_t count = 0;
+ while ((row = taos_fetch_row(res)) != NULL) {
+ avro_value_t value;
+
+ for (int col = 0; col < numFields; col++) {
+ if (0 != avro_value_get_by_name(
+ &record, fields[col].name, &value, NULL)) {
+ errorPrint("%s() LN%d, avro_value_get_by_name(..%s..) failed",
+ __func__, __LINE__, fields[col].name);
+ continue;
}
- }
- } else {
- if (1 == g_args.arg_list_len) {
- int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
- if (records >= 0) {
- okPrint("Database %s dumped\n", g_dbInfos[0]->name);
- g_totalDumpOutRows += records;
+
+ int len;
+ switch (fields[col].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ if (NULL == row[col]) {
+ avro_value_set_int(&value, TSDB_DATA_BOOL_NULL);
+ } else {
+ avro_value_set_boolean(&value,
+ ((((int32_t)(*((char *)row[col])))==1)?1:0));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ if (NULL == row[col]) {
+ avro_value_set_int(&value, TSDB_DATA_TINYINT_NULL);
+ } else {
+ avro_value_set_int(&value, *((int8_t *)row[col]));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ if (NULL == row[col]) {
+ avro_value_set_int(&value, TSDB_DATA_SMALLINT_NULL);
+ } else {
+ avro_value_set_int(&value, *((int16_t *)row[col]));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ if (NULL == row[col]) {
+ avro_value_set_int(&value, TSDB_DATA_INT_NULL);
+ } else {
+ avro_value_set_int(&value, *((int32_t *)row[col]));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ if (NULL == row[col]) {
+ avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL);
+ } else {
+ avro_value_set_long(&value, *((int64_t *)row[col]));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ if (NULL == row[col]) {
+ avro_value_set_float(&value, TSDB_DATA_FLOAT_NULL);
+ } else {
+ avro_value_set_float(&value, GET_FLOAT_VAL(row[col]));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ if (NULL == row[col]) {
+ avro_value_set_double(&value, TSDB_DATA_DOUBLE_NULL);
+ } else {
+ avro_value_set_double(&value, GET_DOUBLE_VAL(row[col]));
+ }
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ if (NULL == row[col]) {
+ avro_value_set_string(&value,
+ (char *)NULL);
+ } else {
+ avro_value_set_string(&value, (char *)row[col]);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_NCHAR:
+ if (NULL == row[col]) {
+ avro_value_set_bytes(&value,
+ (void*)NULL,0);
+ } else {
+ len = strlen((char*)row[col]);
+ avro_value_set_bytes(&value, (void*)(row[col]),len);
+ }
+ break;
+
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (NULL == row[col]) {
+ avro_value_set_long(&value, TSDB_DATA_BIGINT_NULL);
+ } else {
+ avro_value_set_long(&value, *((int64_t *)row[col]));
+ }
+ break;
+
+ default:
+ break;
}
+ }
+
+ if (0 != avro_file_writer_append_value(db, &record)) {
+ errorPrint("%s() LN%d, Unable to write record to file. Message: %s\n",
+ __func__, __LINE__,
+ avro_strerror());
} else {
- dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
+ count ++;
}
+ }
- int superTblCnt = 0 ;
- for (int i = 1; g_args.arg_list[i]; i++) {
- TableRecordInfo tableRecordInfo;
+ avro_value_decref(&record);
+ avro_value_iface_decref(wface);
+ freeRecordSchema(recordSchema);
+ avro_file_writer_close(db);
+ avro_schema_decref(schema);
- if (getTableRecordInfo(g_dbInfos[0]->name,
- g_args.arg_list[i],
- &tableRecordInfo) < 0) {
- errorPrint("input the invalid table %s\n",
- g_args.arg_list[i]);
- continue;
- }
+ return count;
+}
- int64_t records = 0;
- if (tableRecordInfo.isStb) { // dump all table of this stable
- int ret = dumpStableClasuse(
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- fp);
- if (ret >= 0) {
- superTblCnt++;
- records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
- }
- } else if (tableRecordInfo.belongStb){
- dumpStableClasuse(
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- fp);
- records = dumpNormalTableBelongStb(
- g_dbInfos[0],
- tableRecordInfo.tableRecord.stable,
- g_args.arg_list[i]);
- } else {
- records = dumpNormalTableWithoutStb(g_dbInfos[0], g_args.arg_list[i]);
- }
+void freeBindArray(char *bindArray, int onlyCol)
+{
+ TAOS_BIND *bind;
- if (records >= 0) {
- okPrint("table: %s dumped\n", g_args.arg_list[i]);
- g_totalDumpOutRows += records;
- }
+ for (int j = 0; j < onlyCol; j++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * j));
+ if ((TSDB_DATA_TYPE_BINARY != bind->buffer_type)
+ && (TSDB_DATA_TYPE_NCHAR != bind->buffer_type)) {
+ tfree(bind->buffer);
}
}
+}
- /* Close the handle and return */
- fclose(fp);
- taos_free_result(result);
- freeDbInfos();
- fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
- return 0;
+static int dumpInOneAvroFile(char* fcharset,
+ char* encode, char *avroFilepath)
+{
+ debugPrint("avroFilepath: %s\n", avroFilepath);
-_exit_failure:
- fclose(fp);
- taos_close(taos);
- taos_free_result(result);
- freeDbInfos();
- errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
- return -1;
-}
+ avro_file_reader_t reader;
-static int getTableDes(
- char* dbName, char *table,
- TableDef *tableDes, bool isSuperTable) {
- TAOS_ROW row = NULL;
- TAOS_RES* res = NULL;
- int colCount = 0;
+ if(avro_file_reader(avroFilepath, &reader)) {
+ fprintf(stderr, "Unable to open avro file %s: %s\n",
+ avroFilepath, avro_strerror());
+ return -1;
+ }
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbName, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbName);
+ int buf_len = TSDB_MAX_COLUMNS * (TSDB_COL_NAME_LEN + 11 + 16) + 4;
+ char *jsonbuf = calloc(1, buf_len);
+ assert(jsonbuf);
+
+ avro_writer_t jsonwriter = avro_writer_memory(jsonbuf, buf_len);;
+
+ avro_schema_t schema;
+ schema = avro_file_reader_get_writer_schema(reader);
+ avro_schema_to_json(schema, jsonwriter);
+
+ if (0 == strlen(jsonbuf)) {
+ errorPrint("Failed to parse avro file: %s schema. reason: %s\n",
+ avroFilepath, avro_strerror());
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
return -1;
}
+ debugPrint("Schema:\n %s\n", jsonbuf);
- char sqlstr[COMMAND_SIZE];
- sprintf(sqlstr, "describe %s.%s;", dbName, table);
+ json_t *json_root = load_json(jsonbuf);
+ debugPrint("\n%s() LN%d\n *** Schema parsed:\n", __func__, __LINE__);
+ if (g_args.debug_print) {
+ print_json(json_root);
+ }
- res = taos_query(taos, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
+ const char *namespace = avro_schema_namespace((const avro_schema_t)schema);
+ debugPrint("Namespace: %s\n", namespace);
+
+ TAOS *taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ namespace, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ return -1;
+ }
+
+ TAOS_STMT *stmt = taos_stmt_init(taos);
+ if (NULL == stmt) {
taos_close(taos);
+ errorPrint("%s() LN%d, stmt init failed! reason: %s\n",
+ __func__, __LINE__, taos_errstr(NULL));
return -1;
}
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ RecordSchema *recordSchema = parse_json_to_recordschema(json_root);
+ if (NULL == recordSchema) {
+ errorPrint("Failed to parse json to recordschema. reason: %s\n",
+ avro_strerror());
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
+ }
+ json_decref(json_root);
- tstrncpy(tableDes->name, table, TSDB_TABLE_NAME_LEN);
- while ((row = taos_fetch_row(res)) != NULL) {
- tstrncpy(tableDes->cols[colCount].field,
- (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX],
- min(TSDB_COL_NAME_LEN + 1,
- fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes + 1));
- tstrncpy(tableDes->cols[colCount].type,
- (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX],
- min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes + 1));
- tableDes->cols[colCount].length =
- *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]);
- tstrncpy(tableDes->cols[colCount].note,
- (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX],
- min(COL_NOTE_LEN,
- fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes + 1));
- colCount++;
+ TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
+ + sizeof(ColDes) * TSDB_MAX_COLUMNS);
+
+ int allColCount = getTableDes(taos, (char *)namespace, recordSchema->name, tableDes, false);
+
+ if (allColCount < 0) {
+ errorPrint("%s() LN%d, failed to get table[%s] schema\n",
+ __func__,
+ __LINE__,
+ recordSchema->name);
+ free(tableDes);
+ freeRecordSchema(recordSchema);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
}
- taos_free_result(res);
- res = NULL;
+ char *stmtBuffer = calloc(1, TSDB_MAX_ALLOWED_SQL_LEN);
+ assert(stmtBuffer);
+ char *pstr = stmtBuffer;
+ pstr += sprintf(pstr, "INSERT INTO ? VALUES(?");
- if (isSuperTable) {
- return colCount;
+ int onlyCol = 1; // at least timestamp
+ for (int col = 1; col < allColCount; col++) {
+ if (strcmp(tableDes->cols[col].note, "TAG") == 0) continue;
+ pstr += sprintf(pstr, ",?");
+ onlyCol ++;
}
+ pstr += sprintf(pstr, ")");
- // if child-table have tag, using select tagName from table to get tagValue
- for (int i = 0 ; i < colCount; i++) {
- if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue;
+ if (0 != taos_stmt_prepare(stmt, stmtBuffer, 0)) {
+ errorPrint("Failed to execute taos_stmt_prepare(). reason: %s\n",
+ taos_stmt_errstr(stmt));
- sprintf(sqlstr, "select %s from %s.%s",
- tableDes->cols[i].field, dbName, table);
+ free(stmtBuffer);
+ free(tableDes);
+ freeRecordSchema(recordSchema);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
+ }
- res = taos_query(taos, sqlstr);
- code = taos_errno(res);
- if (code != 0) {
- errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
- }
+ if (0 != taos_stmt_set_tbname(stmt, recordSchema->name)) {
+ errorPrint("Failed to execute taos_stmt_set_tbname(%s). reason: %s\n",
+ recordSchema->name, taos_stmt_errstr(stmt));
- fields = taos_fetch_fields(res);
+ free(stmtBuffer);
+ free(tableDes);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+ return -1;
+ }
+
+ avro_value_iface_t *value_class = avro_generic_class_from_schema(schema);
+ avro_value_t value;
+ avro_generic_value_new(value_class, &value);
+
+ char *bindArray =
+ malloc(sizeof(TAOS_BIND) * onlyCol);
+ assert(bindArray);
+
+ int success = 0;
+ int failed = 0;
+ while(!avro_file_reader_read_value(reader, &value)) {
+ memset(bindArray, 0, sizeof(TAOS_BIND) * onlyCol);
+ TAOS_BIND *bind;
+
+ for (int i = 0; i < recordSchema->num_fields; i++) {
+ bind = (TAOS_BIND *)((char *)bindArray + (sizeof(TAOS_BIND) * i));
+
+ avro_value_t field_value;
+
+ FieldStruct *field = (FieldStruct *)(recordSchema->fields + sizeof(FieldStruct) * i);
+
+ bind->is_null = NULL;
+ int is_null = 1;
+ if (0 == i) {
+ int64_t *ts = malloc(sizeof(int64_t));
+ assert(ts);
+
+ avro_value_get_by_name(&value, field->name, &field_value, NULL);
+ avro_value_get_long(&field_value, ts);
+
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = ts;
+ bind->length = &bind->buffer_length;
+ } else if (0 == avro_value_get_by_name(
+ &value, field->name, &field_value, NULL)) {
+
+ if (0 == strcasecmp(tableDes->cols[i].type, "int")) {
+ int32_t *n32 = malloc(sizeof(int32_t));
+ assert(n32);
+
+ avro_value_get_int(&field_value, n32);
+ debugPrint("%d | ", *n32);
+ bind->buffer_type = TSDB_DATA_TYPE_INT;
+ bind->buffer_length = sizeof(int32_t);
+ bind->buffer = n32;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "tinyint")) {
+ int32_t *n8 = malloc(sizeof(int32_t));
+ assert(n8);
+
+ avro_value_get_int(&field_value, n8);
+ debugPrint("%d | ", *n8);
+ bind->buffer_type = TSDB_DATA_TYPE_TINYINT;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = (int8_t *)n8;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "smallint")) {
+ int32_t *n16 = malloc(sizeof(int32_t));
+ assert(n16);
+
+ avro_value_get_int(&field_value, n16);
+ debugPrint("%d | ", *n16);
+ bind->buffer_type = TSDB_DATA_TYPE_SMALLINT;
+ bind->buffer_length = sizeof(int16_t);
+ bind->buffer = (int32_t*)n16;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "bigint")) {
+ int64_t *n64 = malloc(sizeof(int64_t));
+ assert(n64);
+
+ avro_value_get_long(&field_value, n64);
+ debugPrint("%"PRId64" | ", *n64);
+ bind->buffer_type = TSDB_DATA_TYPE_BIGINT;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = n64;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "timestamp")) {
+ int64_t *n64 = malloc(sizeof(int64_t));
+ assert(n64);
+
+ avro_value_get_long(&field_value, n64);
+ debugPrint("%"PRId64" | ", *n64);
+ bind->buffer_type = TSDB_DATA_TYPE_TIMESTAMP;
+ bind->buffer_length = sizeof(int64_t);
+ bind->buffer = n64;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "float")) {
+ float *f = malloc(sizeof(float));
+ assert(f);
+
+ avro_value_get_float(&field_value, f);
+ if (TSDB_DATA_FLOAT_NULL == *f) {
+ debugPrint("%s | ", "NULL");
+ bind->is_null = &is_null;
+ } else {
+ debugPrint("%f | ", *f);
+ bind->buffer = f;
+ }
+ bind->buffer_type = TSDB_DATA_TYPE_FLOAT;
+ bind->buffer_length = sizeof(float);
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "double")) {
+ double *dbl = malloc(sizeof(double));
+ assert(dbl);
+
+ avro_value_get_double(&field_value, dbl);
+ if (TSDB_DATA_DOUBLE_NULL == *dbl) {
+ debugPrint("%s | ", "NULL");
+ bind->is_null = &is_null;
+ } else {
+ debugPrint("%f | ", *dbl);
+ bind->buffer = dbl;
+ }
+ bind->buffer = dbl;
+ bind->buffer_type = TSDB_DATA_TYPE_DOUBLE;
+ bind->buffer_length = sizeof(double);
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "binary")) {
+ size_t size;
+
+ char *buf = NULL;
+ avro_value_get_string(&field_value, (const char **)&buf, &size);
+ debugPrint("%s | ", (char *)buf);
+ bind->buffer_type = TSDB_DATA_TYPE_BINARY;
+ bind->buffer_length = tableDes->cols[i].length;
+ bind->buffer = buf;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "nchar")) {
+ size_t bytessize;
+ void *bytesbuf = NULL;
+
+ avro_value_get_bytes(&field_value, (const void **)&bytesbuf, &bytessize);
+ debugPrint("%s | ", (char*)bytesbuf);
+ bind->buffer_type = TSDB_DATA_TYPE_NCHAR;
+ bind->buffer_length = tableDes->cols[i].length;
+ bind->buffer = bytesbuf;
+ } else if (0 == strcasecmp(tableDes->cols[i].type, "bool")) {
+ int32_t *bl = malloc(sizeof(int32_t));
+ assert(bl);
+
+ avro_value_get_boolean(&field_value, bl);
+ debugPrint("%s | ", (*bl)?"true":"false");
+ bind->buffer_type = TSDB_DATA_TYPE_BOOL;
+ bind->buffer_length = sizeof(int8_t);
+ bind->buffer = (int8_t*)bl;
+ }
+
+ bind->length = &bind->buffer_length;
+ }
- row = taos_fetch_row(res);
- if (NULL == row) {
- errorPrint("%s() LN%d, fetch failed to run command <%s>, reason:%s\n",
- __func__, __LINE__, sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
}
+ debugPrint("%s", "\n");
- if (row[TSDB_SHOW_TABLES_NAME_INDEX] == NULL) {
- sprintf(tableDes->cols[i].note, "%s", "NUL");
- sprintf(tableDes->cols[i].value, "%s", "NULL");
- taos_free_result(res);
- res = NULL;
+ if (0 != taos_stmt_bind_param(stmt, (TAOS_BIND *)bindArray)) {
+ errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ freeBindArray(bindArray, onlyCol);
+ failed --;
+ continue;
+ }
+ if (0 != taos_stmt_add_batch(stmt)) {
+ errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ freeBindArray(bindArray, onlyCol);
+ failed --;
continue;
}
- int32_t* length = taos_fetch_lengths(res);
-
- //int32_t* length = taos_fetch_lengths(tmpResult);
- switch (fields[0].type) {
- case TSDB_DATA_TYPE_BOOL:
- sprintf(tableDes->cols[i].value, "%d",
- ((((int32_t)(*((char *)row[TSDB_SHOW_TABLES_NAME_INDEX]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int8_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int16_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_INT:
- sprintf(tableDes->cols[i].value, "%d",
- *((int32_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- sprintf(tableDes->cols[i].value, "%" PRId64 "",
- *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- sprintf(tableDes->cols[i].value, "%f",
- GET_FLOAT_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- sprintf(tableDes->cols[i].value, "%f",
- GET_DOUBLE_VAL(row[TSDB_SHOW_TABLES_NAME_INDEX]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- memset(tableDes->cols[i].value, 0,
- sizeof(tableDes->cols[i].value));
- int len = strlen((char *)row[0]);
- // FIXME for long value
- if (len < (COL_VALUEBUF_LEN - 2)) {
- converStringToReadable(
- (char *)row[0],
- length[0],
- tableDes->cols[i].value,
- len);
- } else {
- tableDes->cols[i].var_value = calloc(1, len * 2);
- if (tableDes->cols[i].var_value == NULL) {
- errorPrint("%s() LN%d, memory alalocation failed!\n",
- __func__, __LINE__);
- taos_free_result(res);
- return -1;
- }
- converStringToReadable((char *)row[0],
- length[0],
- (char *)(tableDes->cols[i].var_value), len);
- }
- break;
+ freeBindArray(bindArray, onlyCol);
- case TSDB_DATA_TYPE_NCHAR:
- {
- memset(tableDes->cols[i].value, 0, sizeof(tableDes->cols[i].note));
- char tbuf[COL_NOTE_LEN-2]; // need reserve 2 bytes for ' '
- convertNCharToReadable((char *)row[TSDB_SHOW_TABLES_NAME_INDEX], length[0], tbuf, COL_NOTE_LEN);
- sprintf(tableDes->cols[i].value, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
-#if 0
- if (!g_args.mysqlFlag) {
- sprintf(tableDes->cols[i].value, "%" PRId64 "", *(int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- sprintf(tableDes->cols[i].value, "\'%s.%03d\'", buf, (int)(ts % 1000));
- }
-#endif
- break;
- default:
- break;
- }
+ success ++;
+ continue;
+ }
- taos_free_result(res);
+ if (0 != taos_stmt_execute(stmt)) {
+ errorPrint("%s() LN%d stmt_bind_param() failed! reason: %s\n",
+ __func__, __LINE__, taos_stmt_errstr(stmt));
+ failed = success;
}
+ avro_value_decref(&value);
+ avro_value_iface_decref(value_class);
+
+ tfree(bindArray);
+
+ tfree(stmtBuffer);
+ tfree(tableDes);
+
+ freeRecordSchema(recordSchema);
+ avro_schema_decref(schema);
+ avro_file_reader_close(reader);
+ avro_writer_free(jsonwriter);
+
+ tfree(jsonbuf);
+
+ taos_stmt_close(stmt);
taos_close(taos);
- return colCount;
+
+ if (failed < 0)
+ return failed;
+ return success;
}
-static void dumpCreateDbClause(
- SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) {
- char sqlstr[TSDB_MAX_SQL_LEN] = {0};
+static void* dumpInAvroWorkThreadFp(void *arg)
+{
+ threadInfo *pThread = (threadInfo*)arg;
+ setThreadName("dumpInAvroWorkThrd");
+ verbosePrint("[%d] process %"PRId64" files from %"PRId64"\n",
+ pThread->threadIndex, pThread->count, pThread->from);
+
+ for (int64_t i = 0; i < pThread->count; i++) {
+ char avroFile[MAX_PATH_LEN];
+ sprintf(avroFile, "%s/%s", g_args.inpath,
+ g_tsDumpInAvroFiles[pThread->from + i]);
+
+ if (0 == dumpInOneAvroFile(g_tsCharset,
+ g_args.encode,
+ avroFile)) {
+ okPrint("[%d] Success dump in file: %s\n",
+ pThread->threadIndex, avroFile);
+ }
+ }
- char *pstr = sqlstr;
- pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name);
- if (isDumpProperty) {
- pstr += sprintf(pstr,
- "REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d FSYNC %d CACHELAST %d COMP %d PRECISION '%s' UPDATE %d",
- dbInfo->replica, dbInfo->quorum, dbInfo->days,
- dbInfo->keeplist,
- dbInfo->cache,
- dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows,
- dbInfo->fsync,
- dbInfo->cachelast,
- dbInfo->comp, dbInfo->precision, dbInfo->update);
+ return NULL;
+}
+
+static int64_t dumpInAvroWorkThreads()
+{
+ int64_t ret = 0;
+
+ int32_t threads = g_args.thread_num;
+
+ uint64_t avroFileCount = getFilesNum("avro");
+ if (0 == avroFileCount) {
+ debugPrint("No .avro file found in %s\n", g_args.inpath);
+ return 0;
}
- pstr += sprintf(pstr, ";");
- fprintf(fp, "%s\n\n", sqlstr);
+ createDumpinList("avro", avroFileCount);
+
+ threadInfo *pThread;
+
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = (threadInfo *)calloc(
+ threads, sizeof(threadInfo));
+ assert(pids);
+ assert(infos);
+
+ int64_t a = avroFileCount / threads;
+ if (a < 1) {
+ threads = avroFileCount;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = avroFileCount % threads;
+ }
+
+ int64_t from = 0;
+
+ for (int32_t t = 0; t < threads; ++t) {
+ pThread = infos + t;
+ pThread->threadIndex = t;
+
+ pThread->from = from;
+ pThread->count = tcount;
+ verbosePrint(
+ "Thread[%d] takes care avro files total %"PRId64" files from %"PRId64"\n",
+ t, pThread->count, pThread->from);
+
+ if (pthread_create(pids + t, NULL,
+ dumpInAvroWorkThreadFp, (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread[%d] failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ for (int t = 0; t < threads; ++t) {
+ pthread_join(pids[t], NULL);
+ }
+
+ free(infos);
+ free(pids);
+
+ freeFileList(g_tsDumpInAvroFiles, avroFileCount);
+
+ return ret;
}
-static int dumpCreateTableClause(TableDef *tableDes, int numOfCols,
- FILE *fp, char* dbName) {
- int counter = 0;
- int count_temp = 0;
- char sqlstr[COMMAND_SIZE];
+#endif /* AVRO_SUPPORT */
- char* pstr = sqlstr;
+static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
+{
+ int64_t totalRows = 0;
- pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s",
- dbName, tableDes->name);
+ int32_t sql_buf_len = g_args.max_sql_len;
+ char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
+ assert(tmpBuffer);
- for (; counter < numOfCols; counter++) {
- if (tableDes->cols[counter].note[0] != '\0') break;
+ char *pstr = tmpBuffer;
- if (counter == 0) {
- pstr += sprintf(pstr, " (%s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
+ TAOS_ROW row = NULL;
+ int rowFlag = 0;
+ int64_t lastRowsPrint = 5000000;
+ int count = 0;
+
+ int numFields = taos_field_count(res);
+ assert(numFields > 0);
+ TAOS_FIELD *fields = taos_fetch_fields(res);
+
+ int32_t curr_sqlstr_len = 0;
+ int32_t total_sqlstr_len = 0;
+
+ while ((row = taos_fetch_row(res)) != NULL) {
+ curr_sqlstr_len = 0;
+
+ int32_t* length = taos_fetch_lengths(res); // act len
+
+ if (count == 0) {
+ total_sqlstr_len = 0;
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "INSERT INTO %s.%s VALUES (", dbName, tbName);
} else {
- pstr += sprintf(pstr, ", %s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
+ if (g_args.mysqlFlag) {
+ if (0 == rowFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ rowFlag++;
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
+ }
+ } else {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
+ }
}
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
- }
+ for (int col = 0; col < numFields; col++) {
+ if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
+
+ if (row[col] == NULL) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
+ continue;
+ }
+
+ switch (fields[col].type) {
+ case TSDB_DATA_TYPE_BOOL:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ ((((int32_t)(*((char *)row[col])))==1)?1:0));
+ break;
+
+ case TSDB_DATA_TYPE_TINYINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ *((int8_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_SMALLINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ *((int16_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_INT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
+ *((int32_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_BIGINT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "%" PRId64 "",
+ *((int64_t *)row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_FLOAT:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
+ GET_FLOAT_VAL(row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_DOUBLE:
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f",
+ GET_DOUBLE_VAL(row[col]));
+ break;
+
+ case TSDB_DATA_TYPE_BINARY:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ converStringToReadable((char *)row[col], length[col],
+ tbuf, COMMAND_SIZE);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_NCHAR:
+ {
+ char tbuf[COMMAND_SIZE] = {0};
+ convertNCharToReadable((char *)row[col], length[col],
+ tbuf, COMMAND_SIZE);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "\'%s\'", tbuf);
+ break;
+ }
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ if (!g_args.mysqlFlag) {
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "%" PRId64 "",
+ *(int64_t *)row[col]);
+ } else {
+ char buf[64] = "\0";
+ int64_t ts = *((int64_t *)row[col]);
+ time_t tt = (time_t)(ts / 1000);
+ struct tm *ptm = localtime(&tt);
+ strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
+ "\'%s.%03d\'",
+ buf, (int)(ts % 1000));
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
+
+ totalRows++;
+ count++;
+ fprintf(fp, "%s", tmpBuffer);
+
+ if (totalRows >= lastRowsPrint) {
+ printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
+ totalRows, dbName, tbName);
+ lastRowsPrint += 5000000;
+ }
+
+ total_sqlstr_len += curr_sqlstr_len;
+
+ if ((count >= g_args.data_batch)
+ || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
+ fprintf(fp, ";\n");
+ count = 0;
+ }
+ }
+
+ debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
+
+ fprintf(fp, "\n");
+ free(tmpBuffer);
+
+ return totalRows;
+}
+
+static int64_t dumpTableData(FILE *fp, char *tbName,
+ char* dbName, int precision,
+ char *jsonSchema) {
+ int64_t totalRows = 0;
+
+ char sqlstr[1024] = {0};
+
+ int64_t start_time, end_time;
+ if (strlen(g_args.humanStartTime)) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ g_args.humanStartTime, &start_time,
+ strlen(g_args.humanStartTime),
+ precision, 0)) {
+ errorPrint("Input %s, time format error!\n",
+ g_args.humanStartTime);
+ return -1;
+ }
+ } else {
+ start_time = g_args.start_time;
+ }
+
+ if (strlen(g_args.humanEndTime)) {
+ if (TSDB_CODE_SUCCESS != taosParseTime(
+ g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
+ precision, 0)) {
+ errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
+ return -1;
+ }
+ } else {
+ end_time = g_args.end_time;
+ }
+
+ sprintf(sqlstr,
+ "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
+ dbName, tbName, start_time, end_time);
+
+ TAOS *taos = taos_connect(g_args.host,
+ g_args.user, g_args.password, dbName, g_args.port);
+ if (NULL == taos) {
+ errorPrint(
+ "Failed to connect to TDengine server %s by specified database %s\n",
+ g_args.host, dbName);
+ return -1;
+ }
+
+ TAOS_RES* res = taos_query(taos, sqlstr);
+ int32_t code = taos_errno(res);
+ if (code != 0) {
+ errorPrint("failed to run command %s, reason: %s\n",
+ sqlstr, taos_errstr(res));
+ taos_free_result(res);
+ taos_close(taos);
+ return -1;
+ }
+
+#ifdef AVRO_SUPPORT
+ if (g_args.avro) {
+ char avroFilename[MAX_PATH_LEN] = {0};
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(avroFilename, "%s/%s.%s.avro",
+ g_args.outpath, dbName, tbName);
+ } else {
+ sprintf(avroFilename, "%s.%s.avro",
+ dbName, tbName);
+ }
+
+ totalRows = writeResultToAvro(avroFilename, jsonSchema, res);
+ } else
+#endif
+ totalRows = writeResultToSql(res, fp, dbName, tbName);
+
+ taos_free_result(res);
+ taos_close(taos);
+ return totalRows;
+}
+
+static int64_t dumpNormalTable(
+ TAOS *taos,
+ char *dbName,
+ char *stable,
+ char *tbName,
+ int precision,
+ FILE *fp
+ ) {
+ int colCount = 0;
+
+ TableDef *tableDes = (TableDef *)calloc(1, sizeof(TableDef)
+ + sizeof(ColDes) * TSDB_MAX_COLUMNS);
+
+ if (stable != NULL && stable[0] != '\0') { // dump table schema which is created by using super table
+ colCount = getTableDes(taos, dbName, tbName, tableDes, false);
+
+ if (colCount < 0) {
+ errorPrint("%s() LN%d, failed to get table[%s] schema\n",
+ __func__,
+ __LINE__,
+ tbName);
+ free(tableDes);
+ return -1;
+ }
+
+ // create child-table using super-table
+ dumpCreateMTableClause(dbName, stable, tableDes, colCount, fp);
+ } else { // dump table definition
+ colCount = getTableDes(taos, dbName, tbName, tableDes, false);
+
+ if (colCount < 0) {
+ errorPrint("%s() LN%d, failed to get table[%s] schema\n",
+ __func__,
+ __LINE__,
+ tbName);
+ free(tableDes);
+ return -1;
+ }
+
+ // create normal-table or super-table
+ dumpCreateTableClause(tableDes, colCount, fp, dbName);
+ }
+
+ char *jsonSchema = NULL;
+#ifdef AVRO_SUPPORT
+ if (g_args.avro) {
+ if (0 != convertTbDesToJson(
+ dbName, tbName, tableDes, colCount, &jsonSchema)) {
+ errorPrint("%s() LN%d, convertTbDesToJson failed\n",
+ __func__,
+ __LINE__);
+ freeTbDes(tableDes);
+ return -1;
+ }
+ }
+#endif
+
+ int64_t totalRows = 0;
+ if (!g_args.schemaonly) {
+ totalRows = dumpTableData(fp, tbName, dbName, precision,
+ jsonSchema);
+ }
+
+ tfree(jsonSchema);
+ freeTbDes(tableDes);
+ return totalRows;
+}
+
+static int64_t dumpNormalTableWithoutStb(TAOS *taos, SDbInfo *dbInfo, char *ntbName)
+{
+ int64_t count = 0;
+
+ char tmpBuf[MAX_PATH_LEN] = {0};
+ FILE *fp = NULL;
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%s.sql",
+ g_args.outpath, dbInfo->name, ntbName);
+ } else {
+ sprintf(tmpBuf, "%s.%s.sql",
+ dbInfo->name, ntbName);
+ }
+
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
+
+ count = dumpNormalTable(
+ taos,
+ dbInfo->name,
+ NULL,
+ ntbName,
+ getPrecisionByString(dbInfo->precision),
+ fp);
+ if (count > 0) {
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
+ }
+ fclose(fp);
+ return count;
+}
+
+static int64_t dumpNormalTableBelongStb(
+ TAOS *taos,
+ SDbInfo *dbInfo, char *stbName, char *ntbName)
+{
+ int64_t count = 0;
+
+ char tmpBuf[MAX_PATH_LEN] = {0};
+ FILE *fp = NULL;
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%s.sql",
+ g_args.outpath, dbInfo->name, ntbName);
+ } else {
+ sprintf(tmpBuf, "%s.%s.sql",
+ dbInfo->name, ntbName);
+ }
+
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
+
+ count = dumpNormalTable(
+ taos,
+ dbInfo->name,
+ stbName,
+ ntbName,
+ getPrecisionByString(dbInfo->precision),
+ fp);
+ if (count > 0) {
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
+ }
+
+ fclose(fp);
+ return count;
+}
+
+static void *dumpNtbOfDb(void *arg) {
+ threadInfo *pThreadInfo = (threadInfo *)arg;
+
+ debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
+ debugPrint("dump table count = \t%"PRId64"\n",
+ pThreadInfo->count);
+
+ FILE *fp = NULL;
+ char tmpBuf[MAX_PATH_LEN] = {0};
+
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%d.sql",
+ g_args.outpath, pThreadInfo->dbName, pThreadInfo->threadIndex);
+ } else {
+ sprintf(tmpBuf, "%s.%d.sql",
+ pThreadInfo->dbName, pThreadInfo->threadIndex);
+ }
+
+ fp = fopen(tmpBuf, "w");
+
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return NULL;
+ }
+
+ int64_t count;
+ for (int64_t i = 0; i < pThreadInfo->count; i++) {
+ debugPrint("[%d] No.\t%"PRId64" table name: %s\n",
+ pThreadInfo->threadIndex, i,
+ ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name);
+ count = dumpNormalTable(
+ pThreadInfo->taos,
+ pThreadInfo->dbName,
+ ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->stable,
+ ((TableInfo *)(g_tablesList + pThreadInfo->from+i))->name,
+ pThreadInfo->precision,
+ fp);
+ if (count < 0) {
+ break;
+ } else {
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
+ }
+ }
+
+ fclose(fp);
+ return NULL;
+}
+
+static int checkParam() {
+ if (g_args.all_databases && g_args.databases) {
+ errorPrint("%s", "conflict option --all-databases and --databases\n");
+ return -1;
+ }
+
+ if (g_args.start_time > g_args.end_time) {
+ errorPrint("%s", "start time is larger than end time\n");
+ return -1;
+ }
+
+ if (g_args.arg_list_len == 0) {
+ if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
+ errorPrint("%s", "taosdump requires parameters\n");
+ return -1;
+ }
+ }
+ /*
+ if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
+ fprintf(stderr, "duplicate parameter input and output file path\n");
+ return -1;
+ }
+ */
+ if (!g_args.isDumpIn && g_args.encode != NULL) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
+ }
+
+ if (g_args.table_batch <= 0) {
+ fprintf(stderr, "invalid option in dump out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+static bool isEmptyCommand(char *cmd) {
+ char *pchar = cmd;
+
+ while (*pchar != '\0') {
+ if (*pchar != ' ') return false;
+ pchar++;
+ }
+
+ return true;
+}
+
+static void taosReplaceCtrlChar(char *str) {
+ bool ctrlOn = false;
+ char *pstr = NULL;
+
+ for (pstr = str; *str != '\0'; ++str) {
+ if (ctrlOn) {
+ switch (*str) {
+ case 'n':
+ *pstr = '\n';
+ pstr++;
+ break;
+ case 'r':
+ *pstr = '\r';
+ pstr++;
+ break;
+ case 't':
+ *pstr = '\t';
+ pstr++;
+ break;
+ case '\\':
+ *pstr = '\\';
+ pstr++;
+ break;
+ case '\'':
+ *pstr = '\'';
+ pstr++;
+ break;
+ default:
+ break;
+ }
+ ctrlOn = false;
+ } else {
+ if (*str == '\\') {
+ ctrlOn = true;
+ } else {
+ *pstr = *str;
+ pstr++;
+ }
+ }
+ }
+
+ *pstr = '\0';
+}
+*/
+
+char *ascii_literal_list[] = {
+ "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
+ "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
+ "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
+ "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
+ "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
+ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
+ "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
+ "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
+ "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
+ "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
+ "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
+ "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
+ "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
+ "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
+ "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
+ "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
+ "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
+ "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
+ "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
+ "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
+
+static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
+ pstr++;
+ size--;
}
+ *pbuf = '\0';
+ return 0;
+}
- count_temp = counter;
-
- for (; counter < numOfCols; counter++) {
- if (counter == count_temp) {
- pstr += sprintf(pstr, ") TAGS (%s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
- } else {
- pstr += sprintf(pstr, ", %s %s",
- tableDes->cols[counter].field, tableDes->cols[counter].type);
+static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
+ char *pstr = str;
+ char *pbuf = buf;
+ wchar_t wc;
+ while (size > 0) {
+ if (*pstr == '\0') break;
+ int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
+ if (byte_width < 0) {
+ errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
+ exit(-1);
}
- if (strcasecmp(tableDes->cols[counter].type, "binary") == 0 ||
- strcasecmp(tableDes->cols[counter].type, "nchar") == 0) {
- pstr += sprintf(pstr, "(%d)", tableDes->cols[counter].length);
+ if ((int)wc < 256) {
+ pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
+ } else {
+ memcpy(pbuf, pstr, byte_width);
+ pbuf += byte_width;
}
+ pstr += byte_width;
}
- pstr += sprintf(pstr, ");");
-
- debugPrint("%s() LN%d, write string: %s\n", __func__, __LINE__, sqlstr);
- return fprintf(fp, "%s\n\n", sqlstr);
-}
+ *pbuf = '\0';
-static int writeSchemaToAvro(char *jsonAvroSchema)
-{
- errorPrint("%s() LN%d, TODO: implement write schema to avro",
- __func__, __LINE__);
return 0;
}
-static int64_t writeResultToAvro(TAOS_RES *res)
-{
- errorPrint("%s() LN%d, TODO: implementation need\n", __func__, __LINE__);
- return 0;
+static void dumpCharset(FILE *fp) {
+ char charsetline[256];
+
+ (void)fseek(fp, 0, SEEK_SET);
+ sprintf(charsetline, "#!%s\n", tsCharset);
+ (void)fwrite(charsetline, strlen(charsetline), 1, fp);
}
-static int64_t writeResultToSql(TAOS_RES *res, FILE *fp, char *dbName, char *tbName)
-{
- int64_t totalRows = 0;
+static void loadFileCharset(FILE *fp, char *fcharset) {
+ char * line = NULL;
+ size_t line_size = 0;
- int32_t sql_buf_len = g_args.max_sql_len;
- char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128);
- if (tmpBuffer == NULL) {
- errorPrint("failed to allocate %d memory\n", sql_buf_len + 128);
- return -1;
+ (void)fseek(fp, 0, SEEK_SET);
+ ssize_t size = getline(&line, &line_size, fp);
+ if (size <= 2) {
+ goto _exit_no_charset;
}
- char *pstr = tmpBuffer;
-
- TAOS_ROW row = NULL;
- int numFields = 0;
- int rowFlag = 0;
- int64_t lastRowsPrint = 5000000;
- int count = 0;
+ if (strncmp(line, "#!", 2) != 0) {
+ goto _exit_no_charset;
+ }
+ if (line[size - 1] == '\n') {
+ line[size - 1] = '\0';
+ size--;
+ }
+ strcpy(fcharset, line + 2);
- numFields = taos_field_count(res);
- assert(numFields > 0);
- TAOS_FIELD *fields = taos_fetch_fields(res);
+ tfree(line);
+ return;
- int32_t curr_sqlstr_len = 0;
- int32_t total_sqlstr_len = 0;
+_exit_no_charset:
+ (void)fseek(fp, 0, SEEK_SET);
+ *fcharset = '\0';
+ tfree(line);
+ return;
+}
- while ((row = taos_fetch_row(res)) != NULL) {
- curr_sqlstr_len = 0;
+// ======== dumpIn support multi threads functions ================================//
- int32_t* length = taos_fetch_lengths(res); // act len
+static int dumpInOneSqlFile(TAOS* taos, FILE* fp, char* fcharset,
+ char* encode, char* fileName) {
+ int read_len = 0;
+ char * cmd = NULL;
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
- if (count == 0) {
- total_sqlstr_len = 0;
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len,
- "INSERT INTO %s.%s VALUES (", dbName, tbName);
- } else {
- if (g_args.mysqlFlag) {
- if (0 == rowFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- rowFlag++;
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", (");
- }
- } else {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "(");
- }
- }
+ cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
+ if (cmd == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ return -1;
+ }
- for (int col = 0; col < numFields; col++) {
- if (col != 0) curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ", ");
+ int lastRowsPrint = 5000000;
+ int lineNo = 0;
+ while ((read_len = getline(&line, &line_len, fp)) != -1) {
+ ++lineNo;
+ if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
+ line[--read_len] = '\0';
- if (row[col] == NULL) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "NULL");
- continue;
- }
+ //if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ if (read_len == 0 ) {
+ continue;
+ }
- switch (fields[col].type) {
- case TSDB_DATA_TYPE_BOOL:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d",
- ((((int32_t)(*((char *)row[col]))) == 1) ? 1 : 0));
- break;
- case TSDB_DATA_TYPE_TINYINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int8_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int16_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_INT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%d", *((int32_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
- *((int64_t *)row[col]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_FLOAT_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- {
- char tbuf[COMMAND_SIZE] = {0};
- converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_NCHAR:
- {
- char tbuf[COMMAND_SIZE] = {0};
- convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf);
- break;
- }
- case TSDB_DATA_TYPE_TIMESTAMP:
- if (!g_args.mysqlFlag) {
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "",
- *(int64_t *)row[col]);
- } else {
- char buf[64] = "\0";
- int64_t ts = *((int64_t *)row[col]);
- time_t tt = (time_t)(ts / 1000);
- struct tm *ptm = localtime(&tt);
- strftime(buf, 64, "%y-%m-%d %H:%M:%S", ptm);
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "\'%s.%03d\'",
- buf, (int)(ts % 1000));
- }
- break;
- default:
- break;
- }
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
}
- curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ")");
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd[read_len + cmd_len]= '\0';
+ if (queryDbImpl(taos, cmd)) {
+ errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
+ __func__, __LINE__, lineNo, fileName);
+ fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
+ }
- totalRows++;
- count++;
- fprintf(fp, "%s", tmpBuffer);
+ memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
+ cmd_len = 0;
- if (totalRows >= lastRowsPrint) {
- printf(" %"PRId64 " rows already be dumpout from %s.%s\n",
- totalRows, dbName, tbName);
+ if (lineNo >= lastRowsPrint) {
+ printf(" %d lines already be executed from file %s\n", lineNo, fileName);
lastRowsPrint += 5000000;
}
+ }
- total_sqlstr_len += curr_sqlstr_len;
+ tfree(cmd);
+ tfree(line);
+ return 0;
+}
- if ((count >= g_args.data_batch)
- || (sql_buf_len - total_sqlstr_len < TSDB_MAX_BYTES_PER_ROW)) {
- fprintf(fp, ";\n");
- count = 0;
+static void* dumpInSqlWorkThreadFp(void *arg)
+{
+ threadInfo *pThread = (threadInfo*)arg;
+ setThreadName("dumpInSqlWorkThrd");
+ fprintf(stderr, "[%d] Start to process %"PRId64" files from %"PRId64"\n",
+ pThread->threadIndex, pThread->count, pThread->from);
+
+ for (int64_t i = 0; i < pThread->count; i++) {
+ char sqlFile[MAX_PATH_LEN];
+ sprintf(sqlFile, "%s/%s", g_args.inpath, g_tsDumpInSqlFiles[pThread->from + i]);
+
+ FILE* fp = openDumpInFile(sqlFile);
+ if (NULL == fp) {
+ errorPrint("[%d] Failed to open input file: %s\n",
+ pThread->threadIndex, sqlFile);
+ continue;
}
+
+ if (0 == dumpInOneSqlFile(pThread->taos, fp, g_tsCharset, g_args.encode,
+ sqlFile)) {
+ okPrint("[%d] Success dump in file: %s\n",
+ pThread->threadIndex, sqlFile);
+ }
+ fclose(fp);
}
- debugPrint("total_sqlstr_len: %d\n", total_sqlstr_len);
+ return NULL;
+}
- fprintf(fp, "\n");
- atomic_add_fetch_64(&g_totalDumpOutRows, totalRows);
- free(tmpBuffer);
+static int dumpInSqlWorkThreads()
+{
+ int32_t threads = g_args.thread_num;
- return 0;
-}
+ uint64_t sqlFileCount = getFilesNum("sql");
+ if (0 == sqlFileCount) {
+ debugPrint("No .sql file found in %s\n", g_args.inpath);
+ return 0;
+ }
-static int64_t dumpTableData(FILE *fp, char *tbName,
- char* dbName, int precision,
- char *jsonAvroSchema) {
- int64_t totalRows = 0;
+ createDumpinList("sql", sqlFileCount);
+
+ threadInfo *pThread;
+
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = (threadInfo *)calloc(
+ threads, sizeof(threadInfo));
+ assert(pids);
+ assert(infos);
+
+ int64_t a = sqlFileCount / threads;
+ if (a < 1) {
+ threads = sqlFileCount;
+ a = 1;
+ }
+
+ int64_t b = 0;
+ if (threads != 0) {
+ b = sqlFileCount % threads;
+ }
- char sqlstr[1024] = {0};
+ int64_t from = 0;
- int64_t start_time, end_time;
- if (strlen(g_args.humanStartTime)) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- g_args.humanStartTime, &start_time, strlen(g_args.humanStartTime),
- precision, 0)) {
- errorPrint("Input %s, time format error!\n", g_args.humanStartTime);
+ for (int32_t t = 0; t < threads; ++t) {
+ pThread = infos + t;
+ pThread->threadIndex = t;
+
+ pThread->from = from;
+ pThread->count = tcount;
+ verbosePrint(
+ "Thread[%d] takes care sql files total %"PRId64" files from %"PRId64"\n",
+ t, pThread->count, pThread->from);
+
+ pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (pThread->taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ free(infos);
+ free(pids);
return -1;
}
- } else {
- start_time = g_args.start_time;
- }
- if (strlen(g_args.humanEndTime)) {
- if (TSDB_CODE_SUCCESS != taosParseTime(
- g_args.humanEndTime, &end_time, strlen(g_args.humanEndTime),
- precision, 0)) {
- errorPrint("Input %s, time format error!\n", g_args.humanEndTime);
- return -1;
+ if (pthread_create(pids + t, NULL,
+ dumpInSqlWorkThreadFp, (void*)pThread) != 0) {
+ errorPrint("%s() LN%d, thread[%d] failed to start\n",
+ __func__, __LINE__, pThread->threadIndex);
+ exit(EXIT_FAILURE);
}
- } else {
- end_time = g_args.end_time;
}
- sprintf(sqlstr,
- "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;",
- dbName, tbName, start_time, end_time);
-
- TAOS *taos = taos_connect(g_args.host,
- g_args.user, g_args.password, dbName, g_args.port);
- if (NULL == taos) {
- errorPrint(
- "Failed to connect to TDengine server %s by specified database %s\n",
- g_args.host, dbName);
- return -1;
+ for (int t = 0; t < threads; ++t) {
+ pthread_join(pids[t], NULL);
}
- TAOS_RES* res = taos_query(taos, sqlstr);
- int32_t code = taos_errno(res);
- if (code != 0) {
- errorPrint("failed to run command %s, reason: %s\n",
- sqlstr, taos_errstr(res));
- taos_free_result(res);
- taos_close(taos);
- return -1;
+ for (int t = 0; t < threads; ++t) {
+ taos_close(infos[t].taos);
}
+ free(infos);
+ free(pids);
- if (g_args.avro) {
- writeSchemaToAvro(jsonAvroSchema);
- totalRows = writeResultToAvro(res);
- } else {
- totalRows = writeResultToSql(res, fp, dbName, tbName);
- }
+ freeFileList(g_tsDumpInSqlFiles, sqlFileCount);
- taos_free_result(res);
- taos_close(taos);
- return totalRows;
+ return 0;
}
-static int checkParam() {
- if (g_args.all_databases && g_args.databases) {
- errorPrint("%s", "conflict option --all-databases and --databases\n");
- return -1;
- }
+static int dumpInDbs()
+{
+ TAOS *taos = taos_connect(
+ g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
- if (g_args.start_time > g_args.end_time) {
- errorPrint("%s", "start time is larger than end time\n");
+ if (taos == NULL) {
+ errorPrint("%s() LN%d, failed to connect to TDengine server\n",
+ __func__, __LINE__);
return -1;
}
- if (g_args.arg_list_len == 0) {
- if ((!g_args.all_databases) && (!g_args.databases) && (!g_args.isDumpIn)) {
- errorPrint("%s", "taosdump requires parameters\n");
- return -1;
- }
- }
- /*
- if (g_args.isDumpIn && (strcmp(g_args.outpath, DEFAULT_DUMP_FILE) != 0)) {
- fprintf(stderr, "duplicate parameter input and output file path\n");
- return -1;
- }
- */
- if (!g_args.isDumpIn && g_args.encode != NULL) {
- fprintf(stderr, "invalid option in dump out\n");
+ char dbsSql[MAX_PATH_LEN];
+ sprintf(dbsSql, "%s/%s", g_args.inpath, "dbs.sql");
+
+ FILE *fp = openDumpInFile(dbsSql);
+ if (NULL == fp) {
+ errorPrint("%s() LN%d, failed to open input file %s\n",
+ __func__, __LINE__, dbsSql);
return -1;
}
+ debugPrint("Success Open input file: %s\n", dbsSql);
+ loadFileCharset(fp, g_tsCharset);
- if (g_args.table_batch <= 0) {
- fprintf(stderr, "invalid option in dump out\n");
- return -1;
+ if(0 == dumpInOneSqlFile(taos, fp, g_tsCharset, g_args.encode, dbsSql)) {
+ okPrint("Success dump in file: %s !\n", dbsSql);
}
+ fclose(fp);
+ taos_close(taos);
+
return 0;
}
-/*
-static bool isEmptyCommand(char *cmd) {
- char *pchar = cmd;
+static int64_t dumpIn() {
+ assert(g_args.isDumpIn);
- while (*pchar != '\0') {
- if (*pchar != ' ') return false;
- pchar++;
- }
+ int64_t ret = 0;
+ if (dumpInDbs()) {
+ errorPrint("%s", "Failed to dump dbs in!\n");
+ exit(EXIT_FAILURE);
+ }
- return true;
+ ret = dumpInSqlWorkThreads();
+
+#ifdef AVRO_SUPPORT
+ if (0 == ret) {
+ ret = dumpInAvroWorkThreads();
+ }
+#endif
+
+ return ret;
}
-static void taosReplaceCtrlChar(char *str) {
- bool ctrlOn = false;
- char *pstr = NULL;
+static void *dumpNormalTablesOfStb(void *arg) {
+ threadInfo *pThreadInfo = (threadInfo *)arg;
- for (pstr = str; *str != '\0'; ++str) {
- if (ctrlOn) {
- switch (*str) {
- case 'n':
- *pstr = '\n';
- pstr++;
- break;
- case 'r':
- *pstr = '\r';
- pstr++;
- break;
- case 't':
- *pstr = '\t';
- pstr++;
- break;
- case '\\':
- *pstr = '\\';
- pstr++;
- break;
- case '\'':
- *pstr = '\'';
- pstr++;
- break;
- default:
- break;
- }
- ctrlOn = false;
- } else {
- if (*str == '\\') {
- ctrlOn = true;
- } else {
- *pstr = *str;
- pstr++;
- }
+ debugPrint("dump table from = \t%"PRId64"\n", pThreadInfo->from);
+ debugPrint("dump table count = \t%"PRId64"\n", pThreadInfo->count);
+
+ char command[COMMAND_SIZE];
+
+ sprintf(command, "SELECT TBNAME FROM %s.%s LIMIT %"PRId64" OFFSET %"PRId64"",
+ pThreadInfo->dbName, pThreadInfo->stbName,
+ pThreadInfo->count, pThreadInfo->from);
+
+ TAOS_RES *res = taos_query(pThreadInfo->taos, command);
+ int32_t code = taos_errno(res);
+ if (code) {
+ errorPrint("%s() LN%d, failed to run command <%s>. reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(res));
+ taos_free_result(res);
+ return NULL;
}
- }
- *pstr = '\0';
-}
-*/
+ FILE *fp = NULL;
+ char tmpBuf[MAX_PATH_LEN] = {0};
-char *ascii_literal_list[] = {
- "\\x00", "\\x01", "\\x02", "\\x03", "\\x04", "\\x05", "\\x06", "\\x07", "\\x08", "\\t", "\\n", "\\x0b", "\\x0c",
- "\\r", "\\x0e", "\\x0f", "\\x10", "\\x11", "\\x12", "\\x13", "\\x14", "\\x15", "\\x16", "\\x17", "\\x18", "\\x19",
- "\\x1a", "\\x1b", "\\x1c", "\\x1d", "\\x1e", "\\x1f", " ", "!", "\\\"", "#", "$", "%", "&",
- "\\'", "(", ")", "*", "+", ",", "-", ".", "/", "0", "1", "2", "3",
- "4", "5", "6", "7", "8", "9", ":", ";", "<", "=", ">", "?", "@",
- "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
- "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
- "[", "\\\\", "]", "^", "_", "`", "a", "b", "c", "d", "e", "f", "g",
- "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
- "u", "v", "w", "x", "y", "z", "{", "|", "}", "~", "\\x7f", "\\x80", "\\x81",
- "\\x82", "\\x83", "\\x84", "\\x85", "\\x86", "\\x87", "\\x88", "\\x89", "\\x8a", "\\x8b", "\\x8c", "\\x8d", "\\x8e",
- "\\x8f", "\\x90", "\\x91", "\\x92", "\\x93", "\\x94", "\\x95", "\\x96", "\\x97", "\\x98", "\\x99", "\\x9a", "\\x9b",
- "\\x9c", "\\x9d", "\\x9e", "\\x9f", "\\xa0", "\\xa1", "\\xa2", "\\xa3", "\\xa4", "\\xa5", "\\xa6", "\\xa7", "\\xa8",
- "\\xa9", "\\xaa", "\\xab", "\\xac", "\\xad", "\\xae", "\\xaf", "\\xb0", "\\xb1", "\\xb2", "\\xb3", "\\xb4", "\\xb5",
- "\\xb6", "\\xb7", "\\xb8", "\\xb9", "\\xba", "\\xbb", "\\xbc", "\\xbd", "\\xbe", "\\xbf", "\\xc0", "\\xc1", "\\xc2",
- "\\xc3", "\\xc4", "\\xc5", "\\xc6", "\\xc7", "\\xc8", "\\xc9", "\\xca", "\\xcb", "\\xcc", "\\xcd", "\\xce", "\\xcf",
- "\\xd0", "\\xd1", "\\xd2", "\\xd3", "\\xd4", "\\xd5", "\\xd6", "\\xd7", "\\xd8", "\\xd9", "\\xda", "\\xdb", "\\xdc",
- "\\xdd", "\\xde", "\\xdf", "\\xe0", "\\xe1", "\\xe2", "\\xe3", "\\xe4", "\\xe5", "\\xe6", "\\xe7", "\\xe8", "\\xe9",
- "\\xea", "\\xeb", "\\xec", "\\xed", "\\xee", "\\xef", "\\xf0", "\\xf1", "\\xf2", "\\xf3", "\\xf4", "\\xf5", "\\xf6",
- "\\xf7", "\\xf8", "\\xf9", "\\xfa", "\\xfb", "\\xfc", "\\xfd", "\\xfe", "\\xff"};
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/%s.%s.%d.sql",
+ g_args.outpath,
+ pThreadInfo->dbName,
+ pThreadInfo->stbName,
+ pThreadInfo->threadIndex);
+ } else {
+ sprintf(tmpBuf, "%s.%s.%d.sql",
+ pThreadInfo->dbName,
+ pThreadInfo->stbName,
+ pThreadInfo->threadIndex);
+ }
-static int converStringToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- while (size > 0) {
- if (*pstr == '\0') break;
- pbuf = stpcpy(pbuf, ascii_literal_list[((uint8_t)(*pstr))]);
- pstr++;
- size--;
+ fp = fopen(tmpBuf, "w");
+
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return NULL;
}
- *pbuf = '\0';
- return 0;
-}
-static int convertNCharToReadable(char *str, int size, char *buf, int bufsize) {
- char *pstr = str;
- char *pbuf = buf;
- wchar_t wc;
- while (size > 0) {
- if (*pstr == '\0') break;
- int byte_width = mbtowc(&wc, pstr, MB_CUR_MAX);
- if (byte_width < 0) {
- errorPrint("%s() LN%d, mbtowc() return fail.\n", __func__, __LINE__);
- exit(-1);
- }
+ TAOS_ROW row = NULL;
+ int64_t i = 0;
+ int64_t count;
+ while((row = taos_fetch_row(res)) != NULL) {
+ debugPrint("[%d] sub table %"PRId64": name: %s\n",
+ pThreadInfo->threadIndex, i++, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
- if ((int)wc < 256) {
- pbuf = stpcpy(pbuf, ascii_literal_list[(int)wc]);
+ count = dumpNormalTable(
+ pThreadInfo->taos,
+ pThreadInfo->dbName,
+ pThreadInfo->stbName,
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX],
+ pThreadInfo->precision,
+ fp);
+ if (count < 0) {
+ break;
} else {
- memcpy(pbuf, pstr, byte_width);
- pbuf += byte_width;
+ atomic_add_fetch_64(&g_totalDumpOutRows, count);
}
- pstr += byte_width;
}
- *pbuf = '\0';
+ fclose(fp);
+ return NULL;
+}
+
+static int64_t dumpNtbOfDbByThreads(
+ SDbInfo *dbInfo,
+ int64_t ntbCount)
+{
+ if (ntbCount <= 0) {
+ return 0;
+ }
+
+ int threads = g_args.thread_num;
- return 0;
-}
+ int64_t a = ntbCount / threads;
+ if (a < 1) {
+ threads = ntbCount;
+ a = 1;
+ }
-static void dumpCharset(FILE *fp) {
- char charsetline[256];
+ assert(threads);
+ int64_t b = ntbCount % threads;
- (void)fseek(fp, 0, SEEK_SET);
- sprintf(charsetline, "#!%s\n", tsCharset);
- (void)fwrite(charsetline, strlen(charsetline), 1, fp);
-}
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ assert(pids);
+ assert(infos);
-static void loadFileCharset(FILE *fp, char *fcharset) {
- char * line = NULL;
- size_t line_size = 0;
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ pThreadInfo->taos = taos_connect(
+ g_args.host,
+ g_args.user,
+ g_args.password,
+ dbInfo->name,
+ g_args.port
+ );
+ if (NULL == pThreadInfo->taos) {
+ errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
+ __func__,
+ __LINE__,
+ taos_errstr(NULL));
+ free(pids);
+ free(infos);
- (void)fseek(fp, 0, SEEK_SET);
- ssize_t size = getline(&line, &line_size, fp);
- if (size <= 2) {
- goto _exit_no_charset;
+ return -1;
+ }
+
+ pThreadInfo->threadIndex = i;
+ pThreadInfo->count = (ifrom = (i==0)?0:
+ ((threadInfo *)(infos + i - 1))->from +
+ ((threadInfo *)(infos + i - 1))->count;
+ strcpy(pThreadInfo->dbName, dbInfo->name);
+ pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
+
+ pthread_create(pids + i, NULL, dumpNtbOfDb, pThreadInfo);
}
- if (strncmp(line, "#!", 2) != 0) {
- goto _exit_no_charset;
+ for (int64_t i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
}
- if (line[size - 1] == '\n') {
- line[size - 1] = '\0';
- size--;
+
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ taos_close(pThreadInfo->taos);
}
- strcpy(fcharset, line + 2);
- tfree(line);
- return;
+ free(pids);
+ free(infos);
-_exit_no_charset:
- (void)fseek(fp, 0, SEEK_SET);
- *fcharset = '\0';
- tfree(line);
- return;
+ return 0;
}
-// ======== dumpIn support multi threads functions ================================//
-
-static char **g_tsDumpInSqlFiles = NULL;
-static int32_t g_tsSqlFileNum = 0;
-static char g_tsDbSqlFile[MAX_FILE_NAME_LEN] = {0};
-static char g_tsCharset[64] = {0};
-
-static int taosGetFilesNum(const char *directoryName,
- const char *prefix, const char *prefix2)
+static int64_t dumpNTablesOfDb(SDbInfo *dbInfo)
{
- char cmd[1024] = { 0 };
+ TAOS *taos = taos_connect(g_args.host,
+ g_args.user, g_args.password, dbInfo->name, g_args.port);
+ if (NULL == taos) {
+ errorPrint(
+ "Failed to connect to TDengine server %s by specified database %s\n",
+ g_args.host, dbInfo->name);
+ return 0;
+ }
- if (prefix2)
- sprintf(cmd, "ls %s/*.%s %s/*.%s | wc -l ",
- directoryName, prefix, directoryName, prefix2);
- else
- sprintf(cmd, "ls %s/*.%s | wc -l ", directoryName, prefix);
+ char command[COMMAND_SIZE];
+ TAOS_RES *result;
+ int32_t code;
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(-1);
+ sprintf(command, "USE %s", dbInfo->name);
+ result = taos_query(taos, command);
+ code = taos_errno(result);
+ if (code != 0) {
+ errorPrint("invalid database %s, reason: %s\n",
+ dbInfo->name, taos_errstr(result));
+ taos_close(taos);
+ return 0;
}
- int fileNum = 0;
- if (fscanf(fp, "%d", &fileNum) != 1) {
- errorPrint("failed to execute:%s, parse result error\n", cmd);
- exit(-1);
+ sprintf(command, "SHOW TABLES");
+ result = taos_query(taos, command);
+ code = taos_errno(result);
+ if (code != 0) {
+ errorPrint("Failed to show %s\'s tables, reason: %s\n",
+ dbInfo->name, taos_errstr(result));
+ taos_close(taos);
+ return 0;
}
- if (fileNum <= 0) {
- errorPrint("directory:%s is empty\n", directoryName);
- exit(-1);
+ g_tablesList = calloc(1, dbInfo->ntables * sizeof(TableInfo));
+ assert(g_tablesList);
+
+ TAOS_ROW row;
+ int64_t count = 0;
+ while(NULL != (row = taos_fetch_row(result))) {
+ debugPrint("%s() LN%d, No.\t%"PRId64" table name: %s\n",
+ __func__, __LINE__,
+ count, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX]);
+ tstrncpy(((TableInfo *)(g_tablesList + count))->name,
+ (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], TSDB_TABLE_NAME_LEN);
+ char *stbName = (char *) row[TSDB_SHOW_TABLES_METRIC_INDEX];
+ if (stbName) {
+ tstrncpy(((TableInfo *)(g_tablesList + count))->stable,
+ (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], TSDB_TABLE_NAME_LEN);
+ ((TableInfo *)(g_tablesList + count))->belongStb = true;
+ }
+ count ++;
}
+ taos_close(taos);
+
+ int64_t records = dumpNtbOfDbByThreads(dbInfo, count);
+
+ free(g_tablesList);
+ g_tablesList = NULL;
- pclose(fp);
- return fileNum;
+ return records;
}
-static void taosParseDirectory(const char *directoryName,
- const char *prefix, const char *prefix2,
- char **fileArray, int totalFiles)
+static int64_t dumpNtbOfStbByThreads(
+ SDbInfo *dbInfo, char *stbName)
{
- char cmd[1024] = { 0 };
+ int64_t ntbCount = getNtbCountOfStb(dbInfo->name, stbName);
- if (prefix2) {
- sprintf(cmd, "ls %s/*.%s %s/*.%s | sort",
- directoryName, prefix, directoryName, prefix2);
- } else {
- sprintf(cmd, "ls %s/*.%s | sort", directoryName, prefix);
+ if (ntbCount <= 0) {
+ return 0;
}
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(-1);
- }
+ int threads = g_args.thread_num;
- int fileNum = 0;
- while (fscanf(fp, "%128s", fileArray[fileNum++])) {
- if (strcmp(fileArray[fileNum-1], g_tsDbSqlFile) == 0) {
- fileNum--;
- }
- if (fileNum >= totalFiles) {
- break;
- }
+ int64_t a = ntbCount / threads;
+ if (a < 1) {
+ threads = ntbCount;
+ a = 1;
}
- if (fileNum != totalFiles) {
- errorPrint("directory:%s changed while read\n", directoryName);
- pclose(fp);
- exit(-1);
- }
+ assert(threads);
+ int64_t b = ntbCount % threads;
- pclose(fp);
-}
+ pthread_t *pids = calloc(1, threads * sizeof(pthread_t));
+ threadInfo *infos = calloc(1, threads * sizeof(threadInfo));
+ assert(pids);
+ assert(infos);
-static void taosCheckDatabasesSQLFile(const char *directoryName)
-{
- char cmd[1024] = { 0 };
- sprintf(cmd, "ls %s/dbs.sql", directoryName);
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ pThreadInfo->taos = taos_connect(
+ g_args.host,
+ g_args.user,
+ g_args.password,
+ dbInfo->name,
+ g_args.port
+ );
+ if (NULL == pThreadInfo->taos) {
+ errorPrint("%s() LN%d, Failed to connect to TDengine, reason: %s\n",
+ __func__,
+ __LINE__,
+ taos_errstr(NULL));
+ free(pids);
+ free(infos);
- FILE *fp = popen(cmd, "r");
- if (fp == NULL) {
- errorPrint("failed to execute:%s, error:%s\n", cmd, strerror(errno));
- exit(-1);
- }
+ return -1;
+ }
- while (fscanf(fp, "%128s", g_tsDbSqlFile)) {
- break;
- }
+ pThreadInfo->threadIndex = i;
+ pThreadInfo->count = (ifrom = (i==0)?0:
+ ((threadInfo *)(infos + i - 1))->from +
+ ((threadInfo *)(infos + i - 1))->count;
+ strcpy(pThreadInfo->dbName, dbInfo->name);
+ pThreadInfo->precision = getPrecisionByString(dbInfo->precision);
- pclose(fp);
-}
+ strcpy(pThreadInfo->stbName, stbName);
+ pthread_create(pids + i, NULL, dumpNormalTablesOfStb, pThreadInfo);
+ }
-static void taosMallocDumpFiles()
-{
- g_tsDumpInSqlFiles = (char**)calloc(g_tsSqlFileNum, sizeof(char*));
- for (int i = 0; i < g_tsSqlFileNum; i++) {
- g_tsDumpInSqlFiles[i] = calloc(1, MAX_FILE_NAME_LEN);
+ for (int64_t i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
}
-}
-static void freeDumpFiles()
-{
- for (int i = 0; i < g_tsSqlFileNum; i++) {
- tfree(g_tsDumpInSqlFiles[i]);
+ int64_t records = 0;
+ for (int64_t i = 0; i < threads; i++) {
+ threadInfo *pThreadInfo = infos + i;
+ records += pThreadInfo->rowsOfDumpOut;
+ taos_close(pThreadInfo->taos);
}
- tfree(g_tsDumpInSqlFiles);
+
+ free(pids);
+ free(infos);
+
+ return records;
}
-static void taosGetDirectoryFileList(char *inputDir)
+static int64_t dumpWholeDatabase(SDbInfo *dbInfo, FILE *fp)
{
- struct stat fileStat;
- if (stat(inputDir, &fileStat) < 0) {
- errorPrint("%s not exist\n", inputDir);
- exit(-1);
- }
+ dumpCreateDbClause(dbInfo, g_args.with_property, fp);
- if (fileStat.st_mode & S_IFDIR) {
- taosCheckDatabasesSQLFile(inputDir);
- if (g_args.avro)
- g_tsSqlFileNum = taosGetFilesNum(inputDir, "sql", "avro");
- else
- g_tsSqlFileNum += taosGetFilesNum(inputDir, "sql", NULL);
+ fprintf(g_fpOfResult, "\n#### database: %s\n",
+ dbInfo->name);
+ g_resultStatistics.totalDatabasesOfDumpOut++;
- int tsSqlFileNumOfTbls = g_tsSqlFileNum;
- if (g_tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
- }
- taosMallocDumpFiles();
- if (0 != tsSqlFileNumOfTbls) {
- if (g_args.avro) {
- taosParseDirectory(inputDir, "sql", "avro",
- g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
- } else {
- taosParseDirectory(inputDir, "sql", NULL,
- g_tsDumpInSqlFiles, tsSqlFileNumOfTbls);
- }
- }
- fprintf(stdout, "\nstart to dispose %d files in %s\n",
- g_tsSqlFileNum, inputDir);
- } else {
- errorPrint("%s is not a directory\n", inputDir);
- exit(-1);
- }
-}
+ dumpCreateSTableClauseOfDb(dbInfo, fp);
-static FILE* taosOpenDumpInFile(char *fptr) {
- wordexp_t full_path;
+ return dumpNTablesOfDb(dbInfo);
+}
- if (wordexp(fptr, &full_path, 0) != 0) {
- errorPrint("illegal file name: %s\n", fptr);
- return NULL;
- }
+static int dumpOut() {
+ TAOS *taos = NULL;
+ TAOS_RES *result = NULL;
- char *fname = full_path.we_wordv[0];
+ TAOS_ROW row;
+ FILE *fp = NULL;
+ int32_t count = 0;
- FILE *f = NULL;
- if ((fname) && (strlen(fname) > 0)) {
- f = fopen(fname, "r");
- if (f == NULL) {
- errorPrint("%s() LN%d, failed to open file %s\n",
- __func__, __LINE__, fname);
- }
+ char tmpBuf[MAX_PATH_LEN] = {0};
+ if (g_args.outpath[0] != 0) {
+ sprintf(tmpBuf, "%s/dbs.sql", g_args.outpath);
+ } else {
+ sprintf(tmpBuf, "dbs.sql");
}
- wordfree(&full_path);
- return f;
-}
+ fp = fopen(tmpBuf, "w");
+ if (fp == NULL) {
+ errorPrint("%s() LN%d, failed to open file %s\n",
+ __func__, __LINE__, tmpBuf);
+ return -1;
+ }
-static int dumpInOneFile(TAOS* taos, FILE* fp, char* fcharset,
- char* encode, char* fileName) {
- int read_len = 0;
- char * cmd = NULL;
- size_t cmd_len = 0;
- char * line = NULL;
- size_t line_len = 0;
+ g_args.dumpDbCount = getDumpDbCount();
+ debugPrint("%s() LN%d, dump db count: %d\n",
+ __func__, __LINE__, g_args.dumpDbCount);
- cmd = (char *)malloc(TSDB_MAX_ALLOWED_SQL_LEN);
- if (cmd == NULL) {
- errorPrint("%s() LN%d, failed to allocate memory\n",
- __func__, __LINE__);
+ if (0 == g_args.dumpDbCount) {
+ errorPrint("%d databases valid to dump\n", g_args.dumpDbCount);
+ fclose(fp);
return -1;
}
- int lastRowsPrint = 5000000;
- int lineNo = 0;
- while ((read_len = getline(&line, &line_len, fp)) != -1) {
- ++lineNo;
- if (read_len >= TSDB_MAX_ALLOWED_SQL_LEN) continue;
- line[--read_len] = '\0';
+ g_dbInfos = (SDbInfo **)calloc(g_args.dumpDbCount, sizeof(SDbInfo *));
+ if (g_dbInfos == NULL) {
+ errorPrint("%s() LN%d, failed to allocate memory\n",
+ __func__, __LINE__);
+ goto _exit_failure;
+ }
- //if (read_len == 0 || isCommentLine(line)) { // line starts with #
- if (read_len == 0 ) {
- continue;
- }
+ char command[COMMAND_SIZE];
- if (line[read_len - 1] == '\\') {
- line[read_len - 1] = ' ';
- memcpy(cmd + cmd_len, line, read_len);
- cmd_len += read_len;
- continue;
- }
+ /* Connect to server */
+ taos = taos_connect(g_args.host, g_args.user, g_args.password,
+ NULL, g_args.port);
+ if (taos == NULL) {
+ errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
+ goto _exit_failure;
+ }
- memcpy(cmd + cmd_len, line, read_len);
- cmd[read_len + cmd_len]= '\0';
- if (queryDbImpl(taos, cmd)) {
- errorPrint("%s() LN%d, error sql: lineno:%d, file:%s\n",
- __func__, __LINE__, lineNo, fileName);
- fprintf(g_fpOfResult, "error sql: lineno:%d, file:%s\n", lineNo, fileName);
- }
+ /* --------------------------------- Main Code -------------------------------- */
+ /* if (g_args.databases || g_args.all_databases) { // dump part of databases or all databases */
+ /* */
+ dumpCharset(fp);
- memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN);
- cmd_len = 0;
+ sprintf(command, "show databases");
+ result = taos_query(taos, command);
+ int32_t code = taos_errno(result);
- if (lineNo >= lastRowsPrint) {
- printf(" %d lines already be executed from file %s\n", lineNo, fileName);
- lastRowsPrint += 5000000;
- }
+ if (code != 0) {
+ errorPrint("%s() LN%d, failed to run command <%s>, reason: %s\n",
+ __func__, __LINE__, command, taos_errstr(result));
+ goto _exit_failure;
}
- tfree(cmd);
- tfree(line);
- fclose(fp);
- return 0;
-}
+ TAOS_FIELD *fields = taos_fetch_fields(result);
-static void* dumpInWorkThreadFp(void *arg)
-{
- threadInfo *pThread = (threadInfo*)arg;
- setThreadName("dumpInWorkThrd");
+ while ((row = taos_fetch_row(result)) != NULL) {
+ // sys database name : 'log', but subsequent version changed to 'log'
+ if ((strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log",
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0)
+ && (!g_args.allow_sys)) {
+ continue;
+ }
- for (int32_t f = 0; f < g_tsSqlFileNum; ++f) {
- if (f % pThread->totalThreads == pThread->threadIndex) {
- char *SQLFileName = g_tsDumpInSqlFiles[f];
- FILE* fp = taosOpenDumpInFile(SQLFileName);
- if (NULL == fp) {
+ if (g_args.databases) { // input multi dbs
+ if (inDatabasesSeq(
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0) {
continue;
}
- fprintf(stderr, ", Success Open input file: %s\n",
- SQLFileName);
- dumpInOneFile(pThread->taos, fp, g_tsCharset, g_args.encode, SQLFileName);
+ } else if (!g_args.all_databases) { // only input one db
+ if (strncasecmp(g_args.arg_list[0],
+ (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes) != 0)
+ continue;
}
- }
- return NULL;
-}
-
-static void startDumpInWorkThreads()
-{
- pthread_attr_t thattr;
- threadInfo *pThread;
- int32_t totalThreads = g_args.thread_num;
-
- if (totalThreads > g_tsSqlFileNum) {
- totalThreads = g_tsSqlFileNum;
- }
+ g_dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo));
+ if (g_dbInfos[count] == NULL) {
+ errorPrint("%s() LN%d, failed to allocate %"PRIu64" memory\n",
+ __func__, __LINE__, (uint64_t)sizeof(SDbInfo));
+ goto _exit_failure;
+ }
- threadInfo *threadObj = (threadInfo *)calloc(
- totalThreads, sizeof(threadInfo));
+ okPrint("%s exists\n", (char *)row[TSDB_SHOW_DB_NAME_INDEX]);
+ tstrncpy(g_dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX],
+ min(TSDB_DB_NAME_LEN,
+ fields[TSDB_SHOW_DB_NAME_INDEX].bytes + 1));
+ if (g_args.with_property) {
+ g_dbInfos[count]->ntables =
+ *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]);
+ g_dbInfos[count]->vgroups =
+ *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]);
+ g_dbInfos[count]->replica =
+ *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]);
+ g_dbInfos[count]->quorum =
+ *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]);
+ g_dbInfos[count]->days =
+ *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]);
- if (NULL == threadObj) {
- errorPrint("%s() LN%d, memory allocation failed\n", __func__, __LINE__);
- }
+ tstrncpy(g_dbInfos[count]->keeplist,
+ (char *)row[TSDB_SHOW_DB_KEEP_INDEX],
+ min(32, fields[TSDB_SHOW_DB_KEEP_INDEX].bytes + 1));
+ //g_dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]);
+ //g_dbInfos[count]->daysToKeep1;
+ //g_dbInfos[count]->daysToKeep2;
+ g_dbInfos[count]->cache =
+ *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]);
+ g_dbInfos[count]->blocks =
+ *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]);
+ g_dbInfos[count]->minrows =
+ *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]);
+ g_dbInfos[count]->maxrows =
+ *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]);
+ g_dbInfos[count]->wallevel =
+ *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]);
+ g_dbInfos[count]->fsync =
+ *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]);
+ g_dbInfos[count]->comp =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX]));
+ g_dbInfos[count]->cachelast =
+ (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX]));
- for (int32_t t = 0; t < totalThreads; ++t) {
- pThread = threadObj + t;
- pThread->threadIndex = t;
- pThread->totalThreads = totalThreads;
- pThread->taos = taos_connect(g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (pThread->taos == NULL) {
- errorPrint("Failed to connect to TDengine server %s\n", g_args.host);
- free(threadObj);
- return;
+ tstrncpy(g_dbInfos[count]->precision,
+ (char *)row[TSDB_SHOW_DB_PRECISION_INDEX],
+ DB_PRECISION_LEN);
+ g_dbInfos[count]->update =
+ *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]);
}
- pthread_attr_init(&thattr);
- pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+ count++;
- if (pthread_create(&(pThread->threadID), &thattr,
- dumpInWorkThreadFp, (void*)pThread) != 0) {
- errorPrint("%s() LN%d, thread:%d failed to start\n",
- __func__, __LINE__, pThread->threadIndex);
- exit(0);
+ if (g_args.databases) {
+ if (count > g_args.dumpDbCount)
+ break;
+ } else if (!g_args.all_databases) {
+ if (count >= 1)
+ break;
}
}
- for (int t = 0; t < totalThreads; ++t) {
- pthread_join(threadObj[t].threadID, NULL);
- }
-
- for (int t = 0; t < totalThreads; ++t) {
- taos_close(threadObj[t].taos);
+ if (count == 0) {
+ errorPrint("%d databases valid to dump\n", count);
+ goto _exit_failure;
}
- free(threadObj);
-}
-
-static int dumpIn() {
- assert(g_args.isDumpIn);
- TAOS *taos = NULL;
- FILE *fp = NULL;
+ if (g_args.databases || g_args.all_databases) { // case: taosdump --databases dbx,dby ... OR taosdump --all-databases
+ for (int i = 0; i < count; i++) {
+ int64_t records = 0;
+ records = dumpWholeDatabase(g_dbInfos[i], fp);
+ if (records >= 0) {
+ okPrint("Database %s dumped\n", g_dbInfos[i]->name);
+ g_totalDumpOutRows += records;
+ }
+ }
+ } else {
+ if (1 == g_args.arg_list_len) {
+ int64_t records = dumpWholeDatabase(g_dbInfos[0], fp);
+ if (records >= 0) {
+ okPrint("Database %s dumped\n", g_dbInfos[0]->name);
+ g_totalDumpOutRows += records;
+ }
+ } else {
+ dumpCreateDbClause(g_dbInfos[0], g_args.with_property, fp);
+ }
- taos = taos_connect(
- g_args.host, g_args.user, g_args.password,
- NULL, g_args.port);
- if (taos == NULL) {
- errorPrint("%s() LN%d, failed to connect to TDengine server\n",
- __func__, __LINE__);
- return -1;
- }
+ int superTblCnt = 0 ;
+ for (int i = 1; g_args.arg_list[i]; i++) {
+ TableRecordInfo tableRecordInfo;
- taosGetDirectoryFileList(g_args.inpath);
+ if (getTableRecordInfo(g_dbInfos[0]->name,
+ g_args.arg_list[i],
+ &tableRecordInfo) < 0) {
+ errorPrint("input the invalid table %s\n",
+ g_args.arg_list[i]);
+ continue;
+ }
- int32_t tsSqlFileNumOfTbls = g_tsSqlFileNum;
- if (g_tsDbSqlFile[0] != 0) {
- tsSqlFileNumOfTbls--;
+ int64_t records = 0;
+ if (tableRecordInfo.isStb) { // dump all table of this stable
+ int ret = dumpStableClasuse(
+ taos,
+ g_dbInfos[0],
+ tableRecordInfo.tableRecord.stable,
+ fp);
+ if (ret >= 0) {
+ superTblCnt++;
+ records = dumpNtbOfStbByThreads(g_dbInfos[0], g_args.arg_list[i]);
+ }
+ } else if (tableRecordInfo.belongStb){
+ dumpStableClasuse(
+ taos,
+ g_dbInfos[0],
+ tableRecordInfo.tableRecord.stable,
+ fp);
+ records = dumpNormalTableBelongStb(
+ taos,
+ g_dbInfos[0],
+ tableRecordInfo.tableRecord.stable,
+ g_args.arg_list[i]);
+ } else {
+ records = dumpNormalTableWithoutStb(taos, g_dbInfos[0], g_args.arg_list[i]);
+ }
- fp = taosOpenDumpInFile(g_tsDbSqlFile);
- if (NULL == fp) {
- errorPrint("%s() LN%d, failed to open input file %s\n",
- __func__, __LINE__, g_tsDbSqlFile);
- return -1;
+ if (records >= 0) {
+ okPrint("table: %s dumped\n", g_args.arg_list[i]);
+ g_totalDumpOutRows += records;
+ }
}
- fprintf(stderr, "Success Open input file: %s\n", g_tsDbSqlFile);
-
- loadFileCharset(fp, g_tsCharset);
-
- dumpInOneFile(taos, fp, g_tsCharset, g_args.encode,
- g_tsDbSqlFile);
}
taos_close(taos);
- if (0 != tsSqlFileNumOfTbls) {
- startDumpInWorkThreads();
- }
-
- freeDumpFiles();
+ /* Close the handle and return */
+ fclose(fp);
+ taos_free_result(result);
+ freeDbInfos();
+ fprintf(stderr, "dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
return 0;
+
+_exit_failure:
+ fclose(fp);
+ taos_close(taos);
+ taos_free_result(result);
+ freeDbInfos();
+ errorPrint("dump out rows: %" PRId64 "\n", g_totalDumpOutRows);
+ return -1;
}
int main(int argc, char *argv[]) {
@@ -2988,7 +3926,10 @@ int main(int argc, char *argv[]) {
printf("databasesSeq: %s\n", g_args.databasesSeq);
printf("schemaonly: %s\n", g_args.schemaonly?"true":"false");
printf("with_property: %s\n", g_args.with_property?"true":"false");
+#ifdef AVRO_SUPPORT
printf("avro format: %s\n", g_args.avro?"true":"false");
+ printf("avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
+#endif
printf("start_time: %" PRId64 "\n", g_args.start_time);
printf("human readable start time: %s \n", g_args.humanStartTime);
printf("end_time: %" PRId64 "\n", g_args.end_time);
@@ -3042,7 +3983,10 @@ int main(int argc, char *argv[]) {
fprintf(g_fpOfResult, "databasesSeq: %s\n", g_args.databasesSeq);
fprintf(g_fpOfResult, "schemaonly: %s\n", g_args.schemaonly?"true":"false");
fprintf(g_fpOfResult, "with_property: %s\n", g_args.with_property?"true":"false");
+#ifdef AVRO_SUPPORT
fprintf(g_fpOfResult, "avro format: %s\n", g_args.avro?"true":"false");
+ fprintf(g_fpOfResult, "avro codec: %s\n", g_avro_codec[g_args.avro_codec]);
+#endif
fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", g_args.start_time);
fprintf(g_fpOfResult, "human readable start time: %s \n", g_args.humanStartTime);
fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", g_args.end_time);
@@ -3072,6 +4016,7 @@ int main(int argc, char *argv[]) {
tm.tm_year + 1900, tm.tm_mon + 1,
tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
if (dumpIn() < 0) {
+ errorPrint("%s\n", "dumpIn() failed!");
ret = -1;
}
} else {
@@ -3103,4 +4048,3 @@ int main(int argc, char *argv[]) {
return ret;
}
-
diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c
index 9602e16483f3a367117f998cc619dbe408859d93..00471bbf042e147a210fb575e7372d9696c33617 100644
--- a/src/mnode/src/mnodeDb.c
+++ b/src/mnode/src/mnodeDb.c
@@ -927,9 +927,12 @@ static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg) {
pCreate->maxRowsPerFileBlock = htonl(pCreate->maxRowsPerFileBlock);
int32_t code;
+#ifdef GRANT_CHECK_WRITE
if (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS) {
code = TSDB_CODE_GRANT_EXPIRED;
- } else if (!pMsg->pUser->writeAuth) {
+ } // else
+#endif
+ if (!pMsg->pUser->writeAuth) {
code = TSDB_CODE_MND_NO_RIGHTS;
} else {
code = mnodeCreateDb(pMsg->pUser->pAcct, pCreate, pMsg);
diff --git a/src/mnode/src/mnodeFunc.c b/src/mnode/src/mnodeFunc.c
index 253958efbe9a0d81d0542217447eaf750e10caf9..7f3963fc7e39f75bca5b1090907ff49d7e10a504 100644
--- a/src/mnode/src/mnodeFunc.c
+++ b/src/mnode/src/mnodeFunc.c
@@ -191,9 +191,11 @@ static int32_t mnodeUpdateFunc(SFuncObj *pFunc, void *pMsg) {
}
*/
int32_t mnodeCreateFunc(SAcctObj *pAcct, char *name, int32_t codeLen, char *codeScript, char *path, uint8_t outputType, int16_t outputLen, int32_t funcType, int32_t bufSize, SMnodeMsg *pMsg) {
+#ifdef GRANT_CHECK_WRITE
if (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_GRANT_EXPIRED;
}
+#endif
if (!pMsg->pUser->writeAuth) {
return TSDB_CODE_MND_NO_RIGHTS;
diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c
index df3c49165ca00262016006e4450eb1ba574a5525..31de22ae41877dce95a83fab0d4cacd4c574706b 100644
--- a/src/mnode/src/mnodeMain.c
+++ b/src/mnode/src/mnodeMain.c
@@ -55,7 +55,7 @@ static SStep tsMnodeSteps[] = {
{"mnodes", mnodeInitMnodes, mnodeCleanupMnodes},
{"sdb", sdbInit, sdbCleanUp},
{"balance", bnInit, bnCleanUp},
- {"grant", grantInit, grantCleanUp},
+ // {"grant", grantInit, grantCleanUp},
{"show", mnodeInitShow, mnodeCleanUpShow}
};
diff --git a/src/mnode/src/mnodeWrite.c b/src/mnode/src/mnodeWrite.c
index 9a993dfaafab725847a43097497287fbe5642511..a954ecb5c2a746e252f0905eb86d8dd8f1a1fbee 100644
--- a/src/mnode/src/mnodeWrite.c
+++ b/src/mnode/src/mnodeWrite.c
@@ -65,14 +65,16 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) {
return TSDB_CODE_MND_MSG_NOT_PROCESSED;
}
+#ifdef GRANT_CHECK_WRITE
int32_t code = grantCheck(TSDB_GRANT_TIME);
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType],
tstrerror(code));
return code;
}
+#endif
- code = mnodeInitMsg(pMsg);
+ int32_t code = mnodeInitMsg(pMsg);
if (code != TSDB_CODE_SUCCESS) {
mError("msg:%p, app:%p type:%s not processed, reason:%s", pMsg, pMsg->rpcMsg.ahandle, taosMsg[pMsg->rpcMsg.msgType],
tstrerror(code));
diff --git a/src/os/src/darwin/dwSysInfo.c b/src/os/src/darwin/dwSysInfo.c
index 54c6fb1d32a124171d121cbc5bc8f2e9ac5661ac..a87a15a3f211768ecce747c7bc6ff236bad2f3ee 100644
--- a/src/os/src/darwin/dwSysInfo.c
+++ b/src/os/src/darwin/dwSysInfo.c
@@ -65,6 +65,8 @@ static void taosGetSystemTimezone() {
struct tm tm1;
localtime_r(&tx1, &tm1);
+ tsDaylight = daylight;
+
/*
* format example:
*
diff --git a/src/os/src/detail/osSysinfo.c b/src/os/src/detail/osSysinfo.c
index 8094358853947c22064c30f877baea1e5938b3d3..64d33be40ed09b2783b4c7e08b6c20618a43cf8a 100644
--- a/src/os/src/detail/osSysinfo.c
+++ b/src/os/src/detail/osSysinfo.c
@@ -213,6 +213,8 @@ static void taosGetSystemTimezone() {
int32_t tz = (-timezone * MILLISECOND_PER_SECOND) / MILLISECOND_PER_HOUR;
tz += daylight;
+ tsDaylight = daylight;
+
/*
* format example:
*
diff --git a/src/os/src/detail/osTime.c b/src/os/src/detail/osTime.c
index ca5ae77fd85c4fe97be48235ce56018e4ccae3f0..ba77885f02dddf6af6ee6ee7478d1087299a563b 100644
--- a/src/os/src/detail/osTime.c
+++ b/src/os/src/detail/osTime.c
@@ -380,15 +380,20 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
assert(toPrecision == TSDB_TIME_PRECISION_MILLI ||
toPrecision == TSDB_TIME_PRECISION_MICRO ||
toPrecision == TSDB_TIME_PRECISION_NANO);
+ double tempResult = (double)time;
switch(fromPrecision) {
case TSDB_TIME_PRECISION_MILLI: {
switch (toPrecision) {
case TSDB_TIME_PRECISION_MILLI:
return time;
case TSDB_TIME_PRECISION_MICRO:
- return time * 1000;
+ tempResult *= 1000;
+ time *= 1000;
+ goto end_;
case TSDB_TIME_PRECISION_NANO:
- return time * 1000000;
+ tempResult *= 1000000;
+ time *= 1000000;
+ goto end_;
}
} // end from milli
case TSDB_TIME_PRECISION_MICRO: {
@@ -398,7 +403,9 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
case TSDB_TIME_PRECISION_MICRO:
return time;
case TSDB_TIME_PRECISION_NANO:
- return time * 1000;
+ tempResult *= 1000;
+ time *= 1000;
+ goto end_;
}
} //end from micro
case TSDB_TIME_PRECISION_NANO: {
@@ -416,6 +423,10 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
return time; // only to pass windows compilation
}
} //end switch fromPrecision
+end_:
+ if (tempResult > (double)INT64_MAX) return INT64_MAX;
+ if (tempResult < (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL
+ return time;
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
diff --git a/src/os/src/windows/wSysinfo.c b/src/os/src/windows/wSysinfo.c
index 89101ee148b41c2aae667b04cf769f7e8503af08..831a6bdaf09c32e0e1a35bb240200de437b36ae4 100644
--- a/src/os/src/windows/wSysinfo.c
+++ b/src/os/src/windows/wSysinfo.c
@@ -91,6 +91,17 @@ static void taosGetSystemTimezone() {
strcpy(tsTimezone, tz);
}
cfg_timezone->cfgStatus = TAOS_CFG_CSTATUS_DEFAULT;
+
+#ifdef _MSC_VER
+#if _MSC_VER >= 1900
+ int64_t timezone = _timezone;
+ int32_t daylight = _daylight;
+ char **tzname = _tzname;
+#endif
+#endif
+
+ tsDaylight = daylight;
+
uInfo("timezone not configured, use default");
}
}
diff --git a/src/plugins/CMakeLists.txt b/src/plugins/CMakeLists.txt
index 4cf444bab2f05816c1af55d96156334800d758d5..075525a0684b332405a23011b0f7f501658d911e 100644
--- a/src/plugins/CMakeLists.txt
+++ b/src/plugins/CMakeLists.txt
@@ -1,26 +1,6 @@
CMAKE_MINIMUM_REQUIRED(VERSION 3.0...3.20)
PROJECT(TDengine)
-if(NOT WIN32)
- string(ASCII 27 Esc)
- set(ColourReset "${Esc}[m")
- set(ColourBold "${Esc}[1m")
- set(Red "${Esc}[31m")
- set(Green "${Esc}[32m")
- set(Yellow "${Esc}[33m")
- set(Blue "${Esc}[34m")
- set(Magenta "${Esc}[35m")
- set(Cyan "${Esc}[36m")
- set(White "${Esc}[37m")
- set(BoldRed "${Esc}[1;31m")
- set(BoldGreen "${Esc}[1;32m")
- set(BoldYellow "${Esc}[1;33m")
- set(BoldBlue "${Esc}[1;34m")
- set(BoldMagenta "${Esc}[1;35m")
- set(BoldCyan "${Esc}[1;36m")
- set(BoldWhite "${Esc}[1;37m")
-endif()
-
ADD_SUBDIRECTORY(monitor)
IF (TD_BUILD_HTTP)
@@ -30,35 +10,42 @@ IF (TD_BUILD_HTTP)
ADD_SUBDIRECTORY(http)
ELSE ()
MESSAGE("")
- MESSAGE("${Green} use blm3 as httpd ${ColourReset}")
+ MESSAGE("${Green} use taosadapter as httpd ${ColourReset}")
EXECUTE_PROCESS(
- COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/blm3
+ COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
)
EXECUTE_PROCESS(
COMMAND git rev-parse --short HEAD
RESULT_VARIABLE commit_sha1
- OUTPUT_VARIABLE blm3_commit_sha1
+ OUTPUT_VARIABLE taosadapter_commit_sha1
)
- IF ("${blm3_commit_sha1}" STREQUAL "")
- SET(blm3_commit_sha1 "unknown")
+ IF ("${taosadapter_commit_sha1}" STREQUAL "")
+ SET(taosadapter_commit_sha1 "unknown")
ELSE ()
- STRING(SUBSTRING "${blm3_commit_sha1}" 0 7 blm3_commit_sha1)
- STRING(STRIP "${blm3_commit_sha1}" blm3_commit_sha1)
+ STRING(SUBSTRING "${taosadapter_commit_sha1}" 0 7 taosadapter_commit_sha1)
+ STRING(STRIP "${taosadapter_commit_sha1}" taosadapter_commit_sha1)
ENDIF ()
- MESSAGE("${Green} blm3 commit: ${blm3_commit_sha1} ${ColourReset}")
+ MESSAGE("${Green} taosadapter commit: ${taosadapter_commit_sha1} ${ColourReset}")
EXECUTE_PROCESS(
COMMAND cd ..
)
include(ExternalProject)
- ExternalProject_Add(blm3
- PREFIX "blm3"
- SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/blm3
+ ExternalProject_Add(taosadapter
+ PREFIX "taosadapter"
+ SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
BUILD_ALWAYS off
DEPENDS taos
BUILD_IN_SOURCE 1
- CONFIGURE_COMMAND cmake -E echo "blm3 no need cmake to config"
- BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/blm3/version.CommitID=${blm3_commit_sha1}"
- INSTALL_COMMAND cmake -E copy blm3 ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/blm.toml ${CMAKE_BINARY_DIR}/test/cfg/
+ CONFIGURE_COMMAND cmake -E echo "taosadapter no need cmake to config"
+ PATCH_COMMAND
+ COMMAND git clean -f -d
+ BUILD_COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../inc CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -ldflags "-s -w -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ INSTALL_COMMAND
+ COMMAND curl -sL https://github.com/upx/upx/releases/download/v3.96/upx-3.96-amd64_linux.tar.xz -o upx.tar.xz && tar xvJf upx.tar.xz --strip-components 1 > /dev/null && ./upx taosadapter || :
+ COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
+ COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
+ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/
+ COMMAND cmake -E copy ./taosadapter.service ${CMAKE_BINARY_DIR}/test/cfg/
)
ENDIF ()
diff --git a/src/plugins/blm3 b/src/plugins/blm3
deleted file mode 160000
index f56aa0f485d7bb6aebbcefc2007eeecdccb767c8..0000000000000000000000000000000000000000
--- a/src/plugins/blm3
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit f56aa0f485d7bb6aebbcefc2007eeecdccb767c8
diff --git a/src/plugins/http/inc/httpInt.h b/src/plugins/http/inc/httpInt.h
index 6c567e23bc817957d7f376ef101f8e5ca88559e6..bf8efd2831e19480615be291d409d53207bb8f63 100644
--- a/src/plugins/http/inc/httpInt.h
+++ b/src/plugins/http/inc/httpInt.h
@@ -140,28 +140,29 @@ typedef enum {
} EHTTP_CONTEXT_FAILED_CAUSE;
typedef struct HttpContext {
- int32_t refCount;
- SOCKET fd;
- uint32_t accessTimes;
- uint32_t lastAccessTime;
- int32_t state;
- uint8_t reqType;
- uint8_t parsed;
- uint8_t error;
- char ipstr[22];
- char user[TSDB_USER_LEN]; // parsed from auth token or login message
- char pass[HTTP_PASSWORD_LEN];
- char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
- TAOS * taos;
- void * ppContext;
- HttpSession *session;
- z_stream gzipStream;
- HttpParser *parser;
- HttpSqlCmd singleCmd;
- HttpSqlCmds *multiCmds;
- JsonBuf * jsonBuf;
- HttpEncodeMethod *encodeMethod;
- HttpDecodeMethod *decodeMethod;
+ int32_t refCount;
+ SOCKET fd;
+ uint32_t accessTimes;
+ uint32_t lastAccessTime;
+ int32_t state;
+ uint8_t reqType;
+ uint8_t parsed;
+ uint8_t error;
+ char ipstr[22];
+ char user[TSDB_USER_LEN]; // parsed from auth token or login message
+ char pass[HTTP_PASSWORD_LEN];
+ char db[/*TSDB_ACCT_ID_LEN + */TSDB_DB_NAME_LEN];
+ TAOS * taos;
+ void * ppContext;
+ pthread_mutex_t ctxMutex;
+ HttpSession *session;
+ z_stream gzipStream;
+ HttpParser *parser;
+ HttpSqlCmd singleCmd;
+ HttpSqlCmds *multiCmds;
+ JsonBuf *jsonBuf;
+ HttpEncodeMethod *encodeMethod;
+ HttpDecodeMethod *decodeMethod;
struct HttpThread *pThread;
} HttpContext;
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index 11945453c56ab7fdd1fc8b0c4f2510bbbdda1a6e..f26a4b4c8bdda05f801075b70c1b762882adfd27 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -67,6 +67,8 @@ static void httpDestroyContext(void *data) {
pContext->parser = NULL;
}
+ pthread_mutex_destroy(&pContext->ctxMutex);
+
tfree(pContext);
}
@@ -121,13 +123,15 @@ HttpContext *httpCreateContext(SOCKET fd) {
TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE)pContext;
HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &pContext,
- sizeof(TSDB_CACHE_PTR_TYPE), 3000);
+ sizeof(TSDB_CACHE_PTR_TYPE), tsHttpKeepAlive);
pContext->ppContext = ppContext;
httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
// set the ref to 0
taosCacheRelease(tsHttpServer.contextCache, (void **)&ppContext, false);
+ pthread_mutex_init(&pContext->ctxMutex, NULL);
+
return pContext;
}
diff --git a/src/plugins/http/src/httpHandle.c b/src/plugins/http/src/httpHandle.c
index 9719d93824b50064ec1cf23677c641428434592c..6f77994593ebbfc1dc2d9ce97b15a90a797dd8d5 100644
--- a/src/plugins/http/src/httpHandle.c
+++ b/src/plugins/http/src/httpHandle.c
@@ -45,15 +45,14 @@ bool httpProcessData(HttpContext* pContext) {
httpTrace("context:%p, fd:%d, process options request", pContext, pContext->fd);
httpSendOptionResp(pContext, "process options request success");
} else {
- if (!httpDecodeRequest(pContext)) {
- /*
- * httpCloseContextByApp has been called when parsing the error
- */
- // httpCloseContextByApp(pContext);
- } else {
+ pthread_mutex_lock(&pContext->ctxMutex);
+
+ if (httpDecodeRequest(pContext)) {
httpClearParser(pContext->parser);
httpProcessRequest(pContext);
}
+
+ pthread_mutex_unlock(&pContext->ctxMutex);
}
return true;
diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c
index 602767a6563b3ca3430501c0dbcee65333f1d44b..e1b3b17347cba786fa12f5cc2fa7ab3cfb45bd54 100644
--- a/src/plugins/http/src/httpSql.c
+++ b/src/plugins/http/src/httpSql.c
@@ -406,7 +406,14 @@ void httpProcessRequestCb(void *param, TAOS_RES *result, int32_t code) {
if (pContext->session == NULL) {
httpSendErrorResp(pContext, TSDB_CODE_HTTP_SESSION_FULL);
} else {
+ // httpProcessRequestCb called by another thread and a subsequent thread calls this
+ // function again, if this function called by httpProcessRequestCb executes memset
+ // just before the subsequent thread executes *Cmd function, nativSql will be NULL
+ pthread_mutex_lock(&pContext->ctxMutex);
+
httpExecCmd(pContext);
+
+ pthread_mutex_unlock(&pContext->ctxMutex);
}
}
diff --git a/src/plugins/taosadapter b/src/plugins/taosadapter
new file mode 160000
index 0000000000000000000000000000000000000000..6397bf5963f62f0aa5c4b9b961b16ed5c62579f1
--- /dev/null
+++ b/src/plugins/taosadapter
@@ -0,0 +1 @@
+Subproject commit 6397bf5963f62f0aa5c4b9b961b16ed5c62579f1
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index 1bbc8a363ba3af678b42530db48eebb92deece48..ed54723adeafdcd3cdff8b438d2f823a73a04a33 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -43,6 +43,8 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows)
+#define RESET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:(((_r)->outputBuf)->info.rows = 0))
+
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData? 1 : 0)
enum {
@@ -229,6 +231,7 @@ typedef struct SQueryAttr {
bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed
bool multigroupResult; // multigroup result can exist in one SSDataBlock
+ bool needSort; // need sort rowRes
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
@@ -352,16 +355,16 @@ enum OPERATOR_TYPE_E {
typedef struct SOperatorInfo {
uint8_t operatorType;
- bool blockingOptr; // block operator or not
- uint8_t status; // denote if current operator is completed
- int32_t numOfOutput; // number of columns of the current operator results
- char *name; // name, used to show the query execution plan
- void *info; // extension attribution
+ bool blockingOptr; // block operator or not
+ uint8_t status; // denote if current operator is completed
+ int32_t numOfOutput; // number of columns of the current operator results
+ char *name; // name, used to show the query execution plan
+ void *info; // extension attribution
SExprInfo *pExpr;
SQueryRuntimeEnv *pRuntimeEnv;
- struct SOperatorInfo **upstream; // upstream pointer list
- int32_t numOfUpstream; // number of upstream. The value is always ONE expect for join operator
+ struct SOperatorInfo **upstream; // upstream pointer list
+ int32_t numOfUpstream; // number of upstream. The value is always ONE expect for join operator
__operator_fn_t exec;
__optr_cleanup_fn_t cleanup;
} SOperatorInfo;
@@ -392,6 +395,7 @@ typedef struct SQInfo {
int32_t dataReady; // denote if query result is ready or not
void* rspContext; // response context
int64_t startExecTs; // start to exec timestamp
+ int64_t lastRetrieveTs; // last retrieve timestamp
char* sql; // query sql string
SQueryCostInfo summary;
} SQInfo;
diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h
index c231c90996e00d84a70c7141eac69c5a59e20254..0ddaabc5fb9bf6eb2c3a16eeedb3b6d952a1f666 100644
--- a/src/query/inc/qSqlparser.h
+++ b/src/query/inc/qSqlparser.h
@@ -281,7 +281,7 @@ void *destroyRelationInfo(SRelationInfo* pFromInfo);
SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrToken* pAlias);
// sql expr leaf node
-tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType);
+tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optrType);
tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType);
SArray *tStrTokenAppend(SArray *pList, SStrToken *pToken);
diff --git a/src/query/inc/qTableMeta.h b/src/query/inc/qTableMeta.h
index 3fb489f17ed6dd76a6c18c1cdce288c39d0594a7..c1c16267da734ac40cf27276216896e384e294f3 100644
--- a/src/query/inc/qTableMeta.h
+++ b/src/query/inc/qTableMeta.h
@@ -152,7 +152,8 @@ typedef struct SQueryInfo {
struct SQueryInfo *pDownstream;
int32_t havingFieldNum;
bool stableQuery;
- bool groupbyColumn;
+ bool groupbyColumn;
+ bool groupbyTag;
bool simpleAgg;
bool arithmeticOnAgg;
bool projectionQuery;
diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y
index a7f6f0dd68f5398ec1e2c5a9c3580aca6f52f6ba..44898b3c59156c2e4cf9749de47ffa39fa6b53b9 100644
--- a/src/query/inc/sql.y
+++ b/src/query/inc/sql.y
@@ -90,7 +90,6 @@ dbPrefix(A) ::= ids(X) DOT. {A = X; }
%type cpxName {SStrToken}
cpxName(A) ::= . {A.n = 0; }
cpxName(A) ::= DOT ids(Y). {A = Y; A.n += 1; }
-
cmd ::= SHOW CREATE TABLE ids(X) cpxName(Y). {
X.n += Y.n;
setDCLSqlElems(pInfo, TSDB_SQL_SHOW_CREATE_TABLE, 1, &X);
@@ -520,7 +519,7 @@ selcollist(A) ::= sclp(P) distinct(Z) expr(X) as(Y). {
}
selcollist(A) ::= sclp(P) STAR. {
- tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL);
+ tSqlExpr *pNode = tSqlExprCreateIdValue(pInfo, NULL, TK_ALL);
A = tSqlExprListAppend(P, pNode, 0, 0);
}
@@ -701,23 +700,23 @@ where_opt(A) ::= WHERE expr(X). {A = X;}
expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->exprToken.z = X.z; A->exprToken.n = (Z.z - X.z + 1);}
-expr(A) ::= ID(X). { A = tSqlExprCreateIdValue(&X, TK_ID);}
-expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(&X, TK_ID);}
-expr(A) ::= ID(X) DOT STAR(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(&X, TK_ALL);}
-
-expr(A) ::= INTEGER(X). { A = tSqlExprCreateIdValue(&X, TK_INTEGER);}
-expr(A) ::= MINUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprCreateIdValue(&X, TK_INTEGER);}
-expr(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprCreateIdValue(&X, TK_INTEGER);}
-expr(A) ::= FLOAT(X). { A = tSqlExprCreateIdValue(&X, TK_FLOAT);}
-expr(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprCreateIdValue(&X, TK_FLOAT);}
-expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprCreateIdValue(&X, TK_FLOAT);}
-expr(A) ::= STRING(X). { A = tSqlExprCreateIdValue(&X, TK_STRING);}
-expr(A) ::= NOW(X). { A = tSqlExprCreateIdValue(&X, TK_NOW); }
-expr(A) ::= VARIABLE(X). { A = tSqlExprCreateIdValue(&X, TK_VARIABLE);}
-expr(A) ::= PLUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(&X, TK_VARIABLE);}
-expr(A) ::= MINUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(&X, TK_VARIABLE);}
-expr(A) ::= BOOL(X). { A = tSqlExprCreateIdValue(&X, TK_BOOL);}
-expr(A) ::= NULL(X). { A = tSqlExprCreateIdValue(&X, TK_NULL);}
+expr(A) ::= ID(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_ID);}
+expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(pInfo, &X, TK_ID);}
+expr(A) ::= ID(X) DOT STAR(Y). { X.n += (1+Y.n); A = tSqlExprCreateIdValue(pInfo, &X, TK_ALL);}
+
+expr(A) ::= INTEGER(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_INTEGER);}
+expr(A) ::= MINUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprCreateIdValue(pInfo, &X, TK_INTEGER);}
+expr(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprCreateIdValue(pInfo, &X, TK_INTEGER);}
+expr(A) ::= FLOAT(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_FLOAT);}
+expr(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprCreateIdValue(pInfo, &X, TK_FLOAT);}
+expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSqlExprCreateIdValue(pInfo, &X, TK_FLOAT);}
+expr(A) ::= STRING(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_STRING);}
+expr(A) ::= NOW(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_NOW); }
+expr(A) ::= VARIABLE(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_VARIABLE);}
+expr(A) ::= PLUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(pInfo, &X, TK_VARIABLE);}
+expr(A) ::= MINUS(X) VARIABLE(Y). { X.n += Y.n; X.type = TK_VARIABLE; A = tSqlExprCreateIdValue(pInfo, &X, TK_VARIABLE);}
+expr(A) ::= BOOL(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_BOOL);}
+expr(A) ::= NULL(X). { A = tSqlExprCreateIdValue(pInfo, &X, TK_NULL);}
// ordinary functions: min(x), max(x), top(k, 20)
expr(A) ::= ID(X) LP exprlist(Y) RP(E). { tStrTokenAppend(pInfo->funcs, &X); A = tSqlExprCreateFunction(Y, &X, &E, X.type); }
@@ -922,4 +921,4 @@ cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); s
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
LIKE MATCH NMATCH KEY OF OFFSET RAISE REPLACE RESTRICT ROW STATEMENT TRIGGER VIEW ALL
- NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES.
+ NOW IPTOKEN SEMI NONE PREV LINEAR IMPORT TBNAME JOIN STABLE NULL INSERT INTO VALUES FILE.
diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c
index cef76bb6cc69a3c7781da948a4ef289602eb5aec..b0c601c5d86888b8c55d441315632b282e28fe25 100644
--- a/src/query/src/qAggMain.c
+++ b/src/query/src/qAggMain.c
@@ -3178,7 +3178,14 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
default:
qError("error input type");
}
-
+ if (notNullElems > 0) {
+ for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) {
+ SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t];
+ if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) {
+ aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx);
+ }
+ }
+ }
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
}
@@ -3353,6 +3360,12 @@ static void diff_function(SQLFunctionCtx *pCtx) {
*/
assert(pCtx->hasNull);
} else {
+ for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) {
+ SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t];
+ if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) {
+ aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx);
+ }
+ }
int32_t forwardStep = (isFirstBlock) ? notNullElems - 1 : notNullElems;
GET_RES_INFO(pCtx)->numOfRes += forwardStep;
@@ -4727,8 +4740,6 @@ static void csum_function(SQLFunctionCtx *pCtx) {
TSKEY* pTimestamp = pCtx->ptsOutputBuf;
TSKEY* tsList = GET_TS_LIST(pCtx);
- qDebug("%p csum_function() size:%d, hasNull:%d", pCtx, pCtx->size, pCtx->hasNull);
-
for (; i < pCtx->size && i >= 0; i += step) {
char* pData = GET_INPUT_DATA(pCtx, i);
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
@@ -4770,6 +4781,12 @@ static void csum_function(SQLFunctionCtx *pCtx) {
if (notNullElems == 0) {
assert(pCtx->hasNull);
} else {
+ for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) {
+ SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t];
+ if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) {
+ aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx);
+ }
+ }
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
}
@@ -4843,6 +4860,12 @@ static void mavg_function(SQLFunctionCtx *pCtx) {
if (notNullElems <= 0) {
assert(pCtx->hasNull);
} else {
+ for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) {
+ SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t];
+ if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) {
+ aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx);
+ }
+ }
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
GET_RES_INFO(pCtx)->hasResult = DATA_SET_FLAG;
}
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 9457483714310c9ada01148345927f44718a28e8..0382dd4ee6e7d42d6a1e6060de76795b05a0b5a5 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -1313,9 +1313,6 @@ static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
if (pCtx[k].currentStage == MERGE_STAGE) {
pCtx[k].order = TSDB_ORDER_ASC;
}
-
- pCtx[k].startTs = pQueryAttr->window.skey;
-
if (pCtx[k].functionId < 0) {
// load the script and exec
SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo;
@@ -2451,7 +2448,7 @@ bool isQueryKilled(SQInfo *pQInfo) {
// query has been executed more than tsShellActivityTimer, and the retrieve has not arrived
// abort current query execution.
- if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs/1000) > getMaximumIdleDurationSec()) &&
+ if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->lastRetrieveTs/1000) > getMaximumIdleDurationSec()) &&
(!needBuildResAfterQueryComplete(pQInfo))) {
assert(pQInfo->startExecTs != 0);
@@ -4234,7 +4231,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t start = 0;
int32_t step = -1;
- qDebug("QInfo:0x%"PRIx64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv));
+ qDebug("QInfo:0x%"PRIx64" start to copy data from resultrowInfo to output buf", GET_QID(pRuntimeEnv));
assert(orderType == TSDB_ORDER_ASC || orderType == TSDB_ORDER_DESC);
if (orderType == TSDB_ORDER_ASC) {
@@ -4350,31 +4347,16 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data
compSizes = tcalloc(numOfCols, sizeof(int32_t));
}
- if (pQueryAttr->pExpr2 == NULL) {
- for (int32_t col = 0; col < numOfCols; ++col) {
- SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
- if (compressed) {
- compSizes[col] = compressQueryColData(pColRes, pRes->info.rows, data, compressed);
- data += compSizes[col];
- *compLen += compSizes[col];
- compSizes[col] = htonl(compSizes[col]);
- } else {
- memmove(data, pColRes->pData, pColRes->info.bytes * pRes->info.rows);
- data += pColRes->info.bytes * pRes->info.rows;
- }
- }
- } else {
- for (int32_t col = 0; col < numOfCols; ++col) {
- SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
- if (compressed) {
- compSizes[col] = htonl(compressQueryColData(pColRes, numOfRows, data, compressed));
- data += compSizes[col];
- *compLen += compSizes[col];
- compSizes[col] = htonl(compSizes[col]);
- } else {
- memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows);
- data += pColRes->info.bytes * numOfRows;
- }
+ for (int32_t col = 0; col < numOfCols; ++col) {
+ SColumnInfoData* pColRes = taosArrayGet(pRes->pDataBlock, col);
+ if (compressed) {
+ compSizes[col] = compressQueryColData(pColRes, numOfRows, data, compressed);
+ data += compSizes[col];
+ *compLen += compSizes[col];
+ compSizes[col] = htonl(compSizes[col]);
+ } else {
+ memmove(data, pColRes->pData, pColRes->info.bytes * numOfRows);
+ data += pColRes->info.bytes * numOfRows;
}
}
@@ -5238,7 +5220,6 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv*
pInfo->reverseTimes = 0;
pInfo->order = pRuntimeEnv->pQueryAttr->order.order;
pInfo->current = 0;
-// pInfo->prevGroupId = -1;
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "TableScanOperator";
@@ -5991,6 +5972,18 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
return NULL;
}
+static int32_t resRowCompare(const void *r1, const void *r2) {
+ SResultRow *res1 = *(SResultRow **)r1;
+ SResultRow *res2 = *(SResultRow **)r2;
+
+ if (res1->win.skey == res2->win.skey) {
+ return 0;
+ } else {
+ return res1->win.skey > res2->win.skey ? 1 : -1;
+ }
+}
+
+
static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
@@ -6036,6 +6029,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
pQueryAttr->window = win;
pOperator->status = OP_RES_TO_RETURN;
+ if (pIntervalInfo->resultRowInfo.size > 0 && pQueryAttr->needSort) {
+ qsort(pIntervalInfo->resultRowInfo.pResult, pIntervalInfo->resultRowInfo.size, POINTER_BYTES, resRowCompare);
+ }
+
closeAllResultRows(&pIntervalInfo->resultRowInfo);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
finalizeQueryResult(pOperator, pIntervalInfo->pCtx, &pIntervalInfo->resultRowInfo, pIntervalInfo->rowCellInfoOffset);
diff --git a/src/query/src/qFilter.c b/src/query/src/qFilter.c
index 7b3d5e6835ec88e63ee72bc2b996385036897b19..ac18538e21864dc1ae0d2c028c2f014f93856782 100644
--- a/src/query/src/qFilter.c
+++ b/src/query/src/qFilter.c
@@ -1841,6 +1841,15 @@ int32_t filterInitValFieldData(SFilterInfo *info) {
qError("dump value to type[%d] failed", type);
return TSDB_CODE_TSC_INVALID_OPERATION;
}
+
+ // match/nmatch for nchar type need convert from ucs4 to mbs
+ if(type == TSDB_DATA_TYPE_NCHAR &&
+ (unit->compare.optr == TSDB_RELATION_MATCH || unit->compare.optr == TSDB_RELATION_NMATCH)){
+ char newValData[TSDB_REGEX_STRING_DEFAULT_LEN * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE] = {0};
+ int32_t len = taosUcs4ToMbs(varDataVal(fi->data), varDataLen(fi->data), varDataVal(newValData));
+ varDataSetLen(newValData, len);
+ varDataCopy(fi->data, newValData);
+ }
}
return TSDB_CODE_SUCCESS;
@@ -2960,9 +2969,18 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SDataStat
all = false;
continue;
}
+ // match/nmatch for nchar type need convert from ucs4 to mbs
+
+ if(info->cunits[uidx].dataType == TSDB_DATA_TYPE_NCHAR && (info->cunits[uidx].optr == TSDB_RELATION_MATCH || info->cunits[uidx].optr == TSDB_RELATION_NMATCH)){
+ char *newColData = calloc(info->cunits[uidx].dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
+ int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData));
+ varDataSetLen(newColData, len);
+ (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData);
+ tfree(newColData);
+ }else{
+ (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
+ }
- (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
-
if ((*p)[i] == 0) {
all = false;
}
@@ -3009,7 +3027,15 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SDataStatis *
} else if (cunit->rfunc >= 0) {
(*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
} else {
- (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
+ if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == TSDB_RELATION_MATCH || cunit->optr == TSDB_RELATION_NMATCH)){
+ char *newColData = calloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
+ int len = taosUcs4ToMbs(varDataVal(colData), varDataLen(colData), varDataVal(newColData));
+ varDataSetLen(newColData, len);
+ (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData);
+ tfree(newColData);
+ }else{
+ (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
+ }
}
//FILTER_UNIT_SET_R(info, uidx, p[i]);
diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c
index d156230efbc75c46205637747bb58f86d13763fe..07ff79b16155eba158e2cffc24be7dbe1c3d098f 100644
--- a/src/query/src/qSqlParser.c
+++ b/src/query/src/qSqlParser.c
@@ -67,7 +67,6 @@ SSqlInfo qSqlParse(const char *pStr) {
sqlInfo.valid = false;
goto abort_parse;
}
-
default:
Parse(pParser, t0.type, t0, &sqlInfo);
if (sqlInfo.valid == false) {
@@ -134,7 +133,7 @@ SArray *tStrTokenAppend(SArray *pList, SStrToken *pToken) {
return pList;
}
-tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
+tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optrType) {
tSqlExpr *pSqlExpr = calloc(1, sizeof(tSqlExpr));
if (pToken != NULL) {
@@ -169,6 +168,7 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
char unit = 0;
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, &unit, TSDB_TIME_PRECISION_NANO);
if (ret != TSDB_CODE_SUCCESS) {
+ snprintf(pInfo->msg, tListLen(pInfo->msg), "%s", pToken->z);
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
}
diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c
index 3b5f6a9d439f827da66cf829050b4e1d4440d69d..a150f3a717afaa0ddd79a33a9c8be5285c327574 100644
--- a/src/query/src/qUtil.c
+++ b/src/query/src/qUtil.c
@@ -448,7 +448,9 @@ int32_t tsDescOrder(const void* p1, const void* p2) {
}
}
-void orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) {
+void
+
+orderTheResultRows(SQueryRuntimeEnv* pRuntimeEnv) {
__compar_fn_t fn = NULL;
if (pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_ASC) {
fn = tsAscOrder;
diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c
index c6e6eddce7d8f56095d5d78f4d1f84ed1d4f3c97..fce7f649892f87d075c8dd64e4d1160e5d05bf77 100644
--- a/src/query/src/queryMain.c
+++ b/src/query/src/queryMain.c
@@ -272,8 +272,10 @@ bool qTableQuery(qinfo_t qinfo, uint64_t *qId) {
}
*qId = pQInfo->qId;
- if(pQInfo->startExecTs == 0)
+ if(pQInfo->startExecTs == 0) {
pQInfo->startExecTs = taosGetTimestampMs();
+ pQInfo->lastRetrieveTs = pQInfo->startExecTs;
+ }
if (isQueryKilled(pQInfo)) {
qDebug("QInfo:0x%"PRIx64" it is already killed, abort", pQInfo->qId);
@@ -412,6 +414,9 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
setQueryStatus(pRuntimeEnv, QUERY_OVER);
}
+ RESET_NUM_OF_RESULTS(&(pQInfo->runtimeEnv));
+ pQInfo->lastRetrieveTs = taosGetTimestampMs();
+
if ((*pRsp)->compressed && compLen != 0) {
int32_t numOfCols = pQueryAttr->pExpr2 ? pQueryAttr->numOfExpr2 : pQueryAttr->numOfOutput;
int32_t origSize = pQueryAttr->resultRowSize * s;
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index 6eb2c036b2bc19ff27e5bdd3e5ecd4a54e166b1b..4232076fe682fc50ba50eeba937e5e5398ca20c2 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -23,7 +23,6 @@
** input grammar file:
*/
#include
-#include
/************ Begin %include sections from the grammar ************************/
#include
@@ -77,10 +76,8 @@
** zero the stack is dynamically sized using realloc()
** ParseARG_SDECL A static variable declaration for the %extra_argument
** ParseARG_PDECL A parameter declaration for the %extra_argument
-** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter
** ParseARG_STORE Code to store %extra_argument into yypParser
** ParseARG_FETCH Code to extract %extra_argument from yypParser
-** ParseCTX_* As ParseARG_ except for %extra_context
** YYERRORSYMBOL is the code number of the error symbol. If not
** defined, then do no error processing.
** YYNSTATE the combined number of states.
@@ -100,48 +97,41 @@
#endif
/************* Begin control #defines *****************************************/
#define YYCODETYPE unsigned short int
-#define YYNOCODE 278
+#define YYNOCODE 281
#define YYACTIONTYPE unsigned short int
#define ParseTOKENTYPE SStrToken
typedef union {
int yyinit;
ParseTOKENTYPE yy0;
- SRelationInfo* yy8;
- SWindowStateVal yy40;
- SSqlNode* yy56;
- SCreateDbInfo yy90;
- int yy96;
- int32_t yy104;
- SSessionWindowVal yy147;
- SCreatedTableInfo yy152;
- SLimitVal yy166;
- SCreateAcctInfo yy171;
- TAOS_FIELD yy183;
- int64_t yy325;
- SIntervalVal yy400;
- SArray* yy421;
- tVariant yy430;
- SCreateTableSql* yy438;
- tSqlExpr* yy439;
+ SWindowStateVal yy48;
+ SCreateTableSql* yy102;
+ tVariant yy106;
+ int64_t yy109;
+ SSessionWindowVal yy139;
+ SCreateDbInfo yy142;
+ tSqlExpr* yy146;
+ SRelationInfo* yy164;
+ int yy172;
+ SArray* yy221;
+ SIntervalVal yy280;
+ int32_t yy340;
+ SSqlNode* yy376;
+ SCreatedTableInfo yy416;
+ SLimitVal yy454;
+ SCreateAcctInfo yy491;
+ TAOS_FIELD yy503;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
#endif
#define ParseARG_SDECL SSqlInfo* pInfo;
#define ParseARG_PDECL ,SSqlInfo* pInfo
-#define ParseARG_PARAM ,pInfo
-#define ParseARG_FETCH SSqlInfo* pInfo=yypParser->pInfo;
-#define ParseARG_STORE yypParser->pInfo=pInfo;
-#define ParseCTX_SDECL
-#define ParseCTX_PDECL
-#define ParseCTX_PARAM
-#define ParseCTX_FETCH
-#define ParseCTX_STORE
+#define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo
+#define ParseARG_STORE yypParser->pInfo = pInfo
#define YYFALLBACK 1
#define YYNSTATE 368
#define YYNRULE 294
-#define YYNRULE_WITH_ACTION 294
-#define YYNTOKEN 197
+#define YYNTOKEN 198
#define YY_MAX_SHIFT 367
#define YY_MIN_SHIFTREDUCE 576
#define YY_MAX_SHIFTREDUCE 869
@@ -151,7 +141,6 @@ typedef union {
#define YY_MIN_REDUCE 873
#define YY_MAX_REDUCE 1166
/************* End control #defines *******************************************/
-#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
/* Define the yytestcase() macro to be a no-op if is not already defined
** otherwise.
@@ -223,246 +212,247 @@ static const YYACTIONTYPE yy_action[] = {
/* 20 */ 255, 53, 52, 51, 628, 62, 324, 67, 65, 68,
/* 30 */ 66, 157, 629, 286, 238, 58, 57, 344, 343, 56,
/* 40 */ 55, 54, 59, 60, 247, 63, 64, 252, 1029, 255,
- /* 50 */ 53, 52, 51, 209, 62, 324, 67, 65, 68, 66,
- /* 60 */ 999, 1042, 997, 998, 58, 57, 664, 1000, 56, 55,
+ /* 50 */ 53, 52, 51, 664, 62, 324, 67, 65, 68, 66,
+ /* 60 */ 999, 1042, 997, 998, 58, 57, 209, 1000, 56, 55,
/* 70 */ 54, 1001, 1048, 1002, 1003, 58, 57, 277, 1015, 56,
- /* 80 */ 55, 54, 59, 60, 164, 63, 64, 38, 82, 255,
+ /* 80 */ 55, 54, 59, 60, 215, 63, 64, 38, 82, 255,
/* 90 */ 53, 52, 51, 88, 62, 324, 67, 65, 68, 66,
/* 100 */ 284, 283, 249, 752, 58, 57, 1029, 211, 56, 55,
- /* 110 */ 54, 38, 59, 61, 806, 63, 64, 1042, 1143, 255,
+ /* 110 */ 54, 322, 59, 61, 806, 63, 64, 1042, 1143, 255,
/* 120 */ 53, 52, 51, 628, 62, 324, 67, 65, 68, 66,
/* 130 */ 45, 629, 237, 239, 58, 57, 1026, 164, 56, 55,
/* 140 */ 54, 60, 1023, 63, 64, 771, 772, 255, 53, 52,
- /* 150 */ 51, 95, 62, 324, 67, 65, 68, 66, 38, 1090,
- /* 160 */ 1025, 296, 58, 57, 322, 83, 56, 55, 54, 577,
+ /* 150 */ 51, 628, 62, 324, 67, 65, 68, 66, 812, 629,
+ /* 160 */ 815, 216, 58, 57, 322, 100, 56, 55, 54, 577,
/* 170 */ 578, 579, 580, 581, 582, 583, 584, 585, 586, 587,
- /* 180 */ 588, 589, 590, 155, 322, 236, 63, 64, 756, 248,
- /* 190 */ 255, 53, 52, 51, 628, 62, 324, 67, 65, 68,
- /* 200 */ 66, 251, 629, 245, 354, 58, 57, 1026, 215, 56,
+ /* 180 */ 588, 589, 590, 155, 164, 236, 63, 64, 756, 248,
+ /* 190 */ 255, 53, 52, 51, 269, 62, 324, 67, 65, 68,
+ /* 200 */ 66, 1017, 354, 273, 272, 58, 57, 251, 217, 56,
/* 210 */ 55, 54, 1089, 44, 320, 361, 360, 319, 318, 317,
- /* 220 */ 359, 316, 315, 314, 358, 313, 357, 356, 808, 38,
- /* 230 */ 1, 180, 24, 991, 979, 980, 981, 982, 983, 984,
- /* 240 */ 985, 986, 987, 988, 989, 990, 992, 993, 256, 214,
- /* 250 */ 38, 254, 821, 922, 100, 810, 222, 813, 164, 816,
- /* 260 */ 192, 211, 139, 138, 137, 221, 809, 254, 821, 329,
- /* 270 */ 88, 810, 1143, 813, 246, 816, 1028, 29, 1026, 67,
- /* 280 */ 65, 68, 66, 38, 1162, 233, 234, 58, 57, 325,
- /* 290 */ 1017, 56, 55, 54, 38, 333, 56, 55, 54, 1026,
- /* 300 */ 269, 233, 234, 258, 5, 41, 182, 45, 211, 273,
- /* 310 */ 272, 181, 106, 111, 102, 110, 164, 73, 736, 1143,
- /* 320 */ 932, 733, 812, 734, 815, 735, 263, 192, 334, 276,
- /* 330 */ 309, 80, 1026, 94, 69, 123, 117, 128, 229, 335,
- /* 340 */ 362, 960, 127, 1026, 133, 136, 126, 202, 200, 198,
- /* 350 */ 69, 260, 261, 130, 197, 143, 142, 141, 140, 74,
- /* 360 */ 44, 97, 361, 360, 788, 923, 38, 359, 38, 822,
- /* 370 */ 817, 358, 192, 357, 356, 38, 818, 38, 38, 259,
- /* 380 */ 811, 257, 814, 332, 331, 822, 817, 264, 125, 298,
- /* 390 */ 264, 93, 818, 326, 1012, 1013, 35, 1016, 178, 14,
- /* 400 */ 354, 179, 265, 96, 262, 264, 339, 338, 154, 152,
- /* 410 */ 151, 336, 749, 340, 81, 1026, 1027, 1026, 3, 193,
- /* 420 */ 341, 787, 342, 346, 1026, 278, 1026, 1026, 365, 364,
- /* 430 */ 148, 85, 86, 99, 76, 737, 738, 768, 9, 39,
- /* 440 */ 778, 779, 722, 819, 301, 724, 216, 303, 1014, 723,
- /* 450 */ 34, 159, 844, 823, 70, 26, 39, 253, 39, 70,
- /* 460 */ 79, 98, 627, 70, 135, 134, 25, 25, 280, 280,
- /* 470 */ 16, 116, 15, 115, 77, 18, 25, 17, 741, 6,
- /* 480 */ 742, 274, 739, 304, 740, 20, 122, 19, 121, 22,
- /* 490 */ 217, 21, 711, 1100, 1137, 1136, 1135, 825, 231, 156,
- /* 500 */ 232, 820, 212, 213, 218, 210, 1099, 219, 220, 224,
- /* 510 */ 225, 226, 223, 207, 1154, 243, 1096, 1095, 244, 345,
- /* 520 */ 1050, 1061, 1043, 48, 1058, 1059, 1063, 153, 281, 158,
- /* 530 */ 163, 292, 1024, 175, 1082, 174, 1081, 279, 84, 285,
- /* 540 */ 1022, 310, 176, 240, 177, 171, 167, 937, 306, 307,
- /* 550 */ 308, 767, 311, 312, 1040, 165, 166, 46, 287, 289,
- /* 560 */ 297, 299, 205, 168, 42, 78, 75, 50, 323, 931,
- /* 570 */ 330, 1161, 113, 1160, 295, 169, 293, 291, 1157, 183,
- /* 580 */ 337, 1153, 119, 288, 1152, 1149, 184, 957, 43, 40,
- /* 590 */ 47, 206, 919, 129, 49, 917, 131, 132, 915, 914,
- /* 600 */ 266, 195, 196, 911, 910, 909, 908, 907, 906, 905,
- /* 610 */ 199, 201, 902, 900, 898, 896, 203, 893, 204, 889,
- /* 620 */ 355, 124, 89, 290, 1083, 347, 348, 349, 350, 351,
- /* 630 */ 352, 353, 363, 869, 230, 250, 305, 267, 268, 868,
- /* 640 */ 270, 227, 228, 271, 867, 850, 107, 936, 935, 108,
- /* 650 */ 849, 275, 280, 300, 10, 282, 744, 87, 30, 90,
- /* 660 */ 913, 912, 904, 186, 958, 190, 185, 187, 144, 191,
- /* 670 */ 189, 188, 145, 146, 147, 903, 995, 895, 4, 894,
- /* 680 */ 959, 769, 160, 33, 780, 170, 172, 2, 161, 162,
- /* 690 */ 774, 91, 242, 776, 92, 1005, 294, 11, 12, 31,
- /* 700 */ 32, 13, 27, 302, 28, 99, 101, 104, 36, 103,
- /* 710 */ 642, 37, 105, 677, 675, 674, 673, 671, 670, 669,
- /* 720 */ 666, 321, 109, 632, 7, 826, 824, 8, 327, 328,
- /* 730 */ 112, 114, 71, 72, 118, 714, 39, 120, 713, 710,
+ /* 220 */ 359, 316, 315, 314, 358, 313, 357, 356, 38, 1137,
+ /* 230 */ 56, 55, 54, 24, 29, 991, 979, 980, 981, 982,
+ /* 240 */ 983, 984, 985, 986, 987, 988, 989, 990, 992, 993,
+ /* 250 */ 214, 14, 254, 821, 1136, 96, 810, 222, 813, 1090,
+ /* 260 */ 816, 296, 97, 139, 138, 137, 221, 211, 254, 821,
+ /* 270 */ 329, 88, 810, 256, 813, 1135, 816, 1025, 1143, 819,
+ /* 280 */ 67, 65, 68, 66, 326, 99, 233, 234, 58, 57,
+ /* 290 */ 325, 164, 56, 55, 54, 1012, 1013, 35, 1016, 811,
+ /* 300 */ 231, 814, 233, 234, 258, 5, 41, 182, 45, 365,
+ /* 310 */ 364, 148, 181, 106, 111, 102, 110, 164, 263, 736,
+ /* 320 */ 38, 1028, 733, 85, 734, 86, 735, 154, 152, 151,
+ /* 330 */ 276, 309, 80, 211, 38, 69, 123, 117, 128, 229,
+ /* 340 */ 362, 960, 232, 127, 1143, 133, 136, 126, 202, 200,
+ /* 350 */ 198, 69, 260, 261, 130, 197, 143, 142, 141, 140,
+ /* 360 */ 280, 44, 280, 361, 360, 245, 94, 1100, 359, 1026,
+ /* 370 */ 822, 817, 358, 38, 357, 356, 38, 818, 38, 246,
+ /* 380 */ 259, 38, 257, 1026, 332, 331, 822, 817, 825, 38,
+ /* 390 */ 298, 264, 93, 818, 265, 38, 262, 38, 339, 338,
+ /* 400 */ 38, 264, 178, 264, 922, 125, 788, 81, 932, 3,
+ /* 410 */ 193, 192, 179, 749, 1027, 192, 212, 354, 333, 73,
+ /* 420 */ 820, 334, 1026, 335, 923, 1026, 336, 1026, 1, 180,
+ /* 430 */ 1026, 192, 76, 95, 340, 1162, 737, 738, 1026, 9,
+ /* 440 */ 341, 1014, 342, 278, 1026, 346, 1026, 83, 768, 1026,
+ /* 450 */ 778, 779, 722, 808, 301, 724, 303, 39, 253, 723,
+ /* 460 */ 34, 74, 159, 787, 70, 26, 39, 844, 39, 70,
+ /* 470 */ 98, 823, 77, 70, 627, 79, 16, 116, 15, 115,
+ /* 480 */ 6, 25, 18, 213, 17, 25, 274, 741, 25, 742,
+ /* 490 */ 739, 809, 740, 304, 20, 122, 19, 121, 22, 218,
+ /* 500 */ 21, 135, 134, 210, 219, 220, 1154, 711, 156, 1099,
+ /* 510 */ 1050, 224, 225, 226, 223, 207, 243, 1096, 1095, 244,
+ /* 520 */ 345, 48, 1061, 1058, 1059, 1063, 1082, 158, 163, 1043,
+ /* 530 */ 281, 153, 292, 1081, 285, 174, 1024, 175, 1022, 176,
+ /* 540 */ 177, 937, 306, 307, 308, 311, 312, 46, 767, 165,
+ /* 550 */ 205, 42, 1040, 323, 931, 330, 1161, 113, 1160, 75,
+ /* 560 */ 1157, 183, 337, 1153, 240, 119, 78, 287, 289, 1152,
+ /* 570 */ 299, 50, 166, 1149, 184, 297, 957, 167, 43, 40,
+ /* 580 */ 47, 206, 919, 293, 129, 917, 131, 295, 132, 915,
+ /* 590 */ 291, 914, 168, 266, 195, 196, 911, 288, 910, 909,
+ /* 600 */ 908, 907, 906, 905, 199, 201, 902, 900, 898, 896,
+ /* 610 */ 203, 893, 204, 889, 49, 310, 279, 84, 89, 290,
+ /* 620 */ 1083, 355, 348, 124, 347, 349, 350, 230, 351, 250,
+ /* 630 */ 305, 352, 353, 363, 869, 267, 268, 868, 227, 270,
+ /* 640 */ 271, 228, 107, 936, 935, 108, 867, 850, 275, 849,
+ /* 650 */ 913, 280, 300, 912, 10, 87, 282, 744, 144, 187,
+ /* 660 */ 904, 186, 958, 185, 188, 189, 191, 190, 145, 903,
+ /* 670 */ 959, 146, 995, 2, 147, 30, 895, 169, 170, 894,
+ /* 680 */ 171, 172, 4, 33, 1005, 90, 769, 160, 162, 780,
+ /* 690 */ 161, 242, 774, 91, 31, 776, 92, 294, 11, 32,
+ /* 700 */ 12, 13, 27, 28, 302, 101, 99, 642, 104, 36,
+ /* 710 */ 103, 675, 37, 677, 105, 674, 673, 671, 670, 669,
+ /* 720 */ 666, 632, 321, 109, 7, 327, 328, 824, 8, 112,
+ /* 730 */ 826, 114, 71, 72, 118, 714, 39, 713, 710, 120,
/* 740 */ 658, 656, 648, 654, 650, 652, 646, 644, 680, 679,
/* 750 */ 678, 676, 672, 668, 667, 194, 630, 594, 873, 872,
/* 760 */ 872, 872, 872, 872, 872, 872, 872, 872, 872, 872,
/* 770 */ 872, 149, 150,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 266, 1, 199, 200, 199, 266, 245, 5, 266, 9,
- /* 10 */ 249, 197, 198, 13, 14, 253, 16, 17, 247, 277,
+ /* 0 */ 268, 1, 201, 202, 201, 268, 247, 5, 268, 9,
+ /* 10 */ 251, 199, 200, 13, 14, 255, 16, 17, 249, 279,
/* 20 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29,
- /* 30 */ 30, 199, 9, 271, 263, 35, 36, 35, 36, 39,
- /* 40 */ 40, 41, 13, 14, 245, 16, 17, 206, 249, 20,
- /* 50 */ 21, 22, 23, 266, 25, 26, 27, 28, 29, 30,
- /* 60 */ 223, 247, 225, 226, 35, 36, 5, 230, 39, 40,
- /* 70 */ 41, 234, 267, 236, 237, 35, 36, 263, 0, 39,
- /* 80 */ 40, 41, 13, 14, 199, 16, 17, 199, 88, 20,
+ /* 30 */ 30, 201, 9, 273, 265, 35, 36, 35, 36, 39,
+ /* 40 */ 40, 41, 13, 14, 247, 16, 17, 208, 251, 20,
+ /* 50 */ 21, 22, 23, 5, 25, 26, 27, 28, 29, 30,
+ /* 60 */ 225, 249, 227, 228, 35, 36, 268, 232, 39, 40,
+ /* 70 */ 41, 236, 269, 238, 239, 35, 36, 265, 0, 39,
+ /* 80 */ 40, 41, 13, 14, 268, 16, 17, 201, 88, 20,
/* 90 */ 21, 22, 23, 84, 25, 26, 27, 28, 29, 30,
- /* 100 */ 268, 269, 245, 39, 35, 36, 249, 266, 39, 40,
- /* 110 */ 41, 199, 13, 14, 85, 16, 17, 247, 277, 20,
+ /* 100 */ 270, 271, 247, 39, 35, 36, 251, 268, 39, 40,
+ /* 110 */ 41, 86, 13, 14, 85, 16, 17, 249, 279, 20,
/* 120 */ 21, 22, 23, 1, 25, 26, 27, 28, 29, 30,
- /* 130 */ 121, 9, 244, 263, 35, 36, 248, 199, 39, 40,
- /* 140 */ 41, 14, 199, 16, 17, 127, 128, 20, 21, 22,
- /* 150 */ 23, 250, 25, 26, 27, 28, 29, 30, 199, 274,
- /* 160 */ 248, 276, 35, 36, 86, 264, 39, 40, 41, 47,
+ /* 130 */ 121, 9, 246, 265, 35, 36, 250, 201, 39, 40,
+ /* 140 */ 41, 14, 201, 16, 17, 127, 128, 20, 21, 22,
+ /* 150 */ 23, 1, 25, 26, 27, 28, 29, 30, 5, 9,
+ /* 160 */ 7, 268, 35, 36, 86, 209, 39, 40, 41, 47,
/* 170 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 180 */ 58, 59, 60, 61, 86, 63, 16, 17, 124, 246,
- /* 190 */ 20, 21, 22, 23, 1, 25, 26, 27, 28, 29,
- /* 200 */ 30, 206, 9, 244, 92, 35, 36, 248, 266, 39,
- /* 210 */ 40, 41, 274, 100, 101, 102, 103, 104, 105, 106,
- /* 220 */ 107, 108, 109, 110, 111, 112, 113, 114, 1, 199,
- /* 230 */ 208, 209, 46, 223, 224, 225, 226, 227, 228, 229,
- /* 240 */ 230, 231, 232, 233, 234, 235, 236, 237, 206, 63,
- /* 250 */ 199, 1, 2, 205, 207, 5, 70, 7, 199, 9,
- /* 260 */ 212, 266, 76, 77, 78, 79, 39, 1, 2, 83,
- /* 270 */ 84, 5, 277, 7, 244, 9, 249, 84, 248, 27,
- /* 280 */ 28, 29, 30, 199, 249, 35, 36, 35, 36, 39,
- /* 290 */ 243, 39, 40, 41, 199, 244, 39, 40, 41, 248,
- /* 300 */ 144, 35, 36, 70, 64, 65, 66, 121, 266, 153,
- /* 310 */ 154, 71, 72, 73, 74, 75, 199, 99, 2, 277,
- /* 320 */ 205, 5, 5, 7, 7, 9, 70, 212, 244, 143,
- /* 330 */ 90, 145, 248, 274, 84, 64, 65, 66, 152, 244,
- /* 340 */ 221, 222, 71, 248, 73, 74, 75, 64, 65, 66,
- /* 350 */ 84, 35, 36, 82, 71, 72, 73, 74, 75, 141,
- /* 360 */ 100, 207, 102, 103, 78, 205, 199, 107, 199, 119,
- /* 370 */ 120, 111, 212, 113, 114, 199, 126, 199, 199, 146,
- /* 380 */ 5, 148, 7, 150, 151, 119, 120, 199, 80, 272,
- /* 390 */ 199, 274, 126, 15, 240, 241, 242, 243, 210, 84,
- /* 400 */ 92, 210, 146, 88, 148, 199, 150, 151, 64, 65,
- /* 410 */ 66, 244, 99, 244, 207, 248, 210, 248, 203, 204,
- /* 420 */ 244, 135, 244, 244, 248, 85, 248, 248, 67, 68,
- /* 430 */ 69, 85, 85, 118, 99, 119, 120, 85, 125, 99,
- /* 440 */ 85, 85, 85, 126, 85, 85, 266, 85, 241, 85,
- /* 450 */ 84, 99, 85, 85, 99, 99, 99, 62, 99, 99,
- /* 460 */ 84, 99, 85, 99, 80, 81, 99, 99, 122, 122,
- /* 470 */ 147, 147, 149, 149, 139, 147, 99, 149, 5, 84,
- /* 480 */ 7, 199, 5, 117, 7, 147, 147, 149, 149, 147,
- /* 490 */ 266, 149, 116, 239, 266, 266, 266, 119, 266, 199,
- /* 500 */ 266, 126, 266, 266, 266, 266, 239, 266, 266, 266,
- /* 510 */ 266, 266, 266, 266, 249, 239, 239, 239, 239, 239,
- /* 520 */ 199, 199, 247, 265, 199, 199, 199, 62, 247, 199,
- /* 530 */ 199, 199, 247, 199, 275, 251, 275, 201, 201, 270,
- /* 540 */ 199, 91, 199, 270, 199, 255, 259, 199, 199, 199,
- /* 550 */ 199, 126, 199, 199, 262, 261, 260, 199, 270, 270,
- /* 560 */ 136, 133, 199, 258, 199, 138, 140, 137, 199, 199,
- /* 570 */ 199, 199, 199, 199, 131, 257, 130, 129, 199, 199,
- /* 580 */ 199, 199, 199, 132, 199, 199, 199, 199, 199, 199,
- /* 590 */ 199, 199, 199, 199, 142, 199, 199, 199, 199, 199,
- /* 600 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- /* 610 */ 199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
- /* 620 */ 115, 98, 201, 201, 201, 97, 53, 94, 96, 57,
- /* 630 */ 95, 93, 86, 5, 201, 201, 201, 155, 5, 5,
- /* 640 */ 155, 201, 201, 5, 5, 102, 207, 211, 211, 207,
- /* 650 */ 101, 144, 122, 117, 84, 99, 85, 123, 84, 99,
- /* 660 */ 201, 201, 201, 218, 220, 216, 219, 214, 202, 213,
- /* 670 */ 215, 217, 202, 202, 202, 201, 238, 201, 203, 201,
- /* 680 */ 222, 85, 84, 252, 85, 256, 254, 208, 84, 99,
- /* 690 */ 85, 84, 1, 85, 84, 238, 84, 134, 134, 99,
- /* 700 */ 99, 84, 84, 117, 84, 118, 80, 72, 89, 88,
- /* 710 */ 5, 89, 88, 9, 5, 5, 5, 5, 5, 5,
- /* 720 */ 5, 15, 80, 87, 84, 119, 85, 84, 26, 61,
- /* 730 */ 149, 149, 16, 16, 149, 5, 99, 149, 5, 85,
+ /* 180 */ 58, 59, 60, 61, 201, 63, 16, 17, 124, 248,
+ /* 190 */ 20, 21, 22, 23, 144, 25, 26, 27, 28, 29,
+ /* 200 */ 30, 245, 92, 153, 154, 35, 36, 208, 268, 39,
+ /* 210 */ 40, 41, 276, 100, 101, 102, 103, 104, 105, 106,
+ /* 220 */ 107, 108, 109, 110, 111, 112, 113, 114, 201, 268,
+ /* 230 */ 39, 40, 41, 46, 84, 225, 226, 227, 228, 229,
+ /* 240 */ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+ /* 250 */ 63, 84, 1, 2, 268, 88, 5, 70, 7, 276,
+ /* 260 */ 9, 278, 209, 76, 77, 78, 79, 268, 1, 2,
+ /* 270 */ 83, 84, 5, 208, 7, 268, 9, 250, 279, 126,
+ /* 280 */ 27, 28, 29, 30, 15, 118, 35, 36, 35, 36,
+ /* 290 */ 39, 201, 39, 40, 41, 242, 243, 244, 245, 5,
+ /* 300 */ 268, 7, 35, 36, 70, 64, 65, 66, 121, 67,
+ /* 310 */ 68, 69, 71, 72, 73, 74, 75, 201, 70, 2,
+ /* 320 */ 201, 251, 5, 85, 7, 85, 9, 64, 65, 66,
+ /* 330 */ 143, 90, 145, 268, 201, 84, 64, 65, 66, 152,
+ /* 340 */ 223, 224, 268, 71, 279, 73, 74, 75, 64, 65,
+ /* 350 */ 66, 84, 35, 36, 82, 71, 72, 73, 74, 75,
+ /* 360 */ 122, 100, 122, 102, 103, 246, 276, 241, 107, 250,
+ /* 370 */ 119, 120, 111, 201, 113, 114, 201, 126, 201, 246,
+ /* 380 */ 146, 201, 148, 250, 150, 151, 119, 120, 119, 201,
+ /* 390 */ 274, 201, 276, 126, 146, 201, 148, 201, 150, 151,
+ /* 400 */ 201, 201, 212, 201, 207, 80, 78, 209, 207, 205,
+ /* 410 */ 206, 214, 212, 99, 212, 214, 268, 92, 246, 99,
+ /* 420 */ 126, 246, 250, 246, 207, 250, 246, 250, 210, 211,
+ /* 430 */ 250, 214, 99, 252, 246, 251, 119, 120, 250, 125,
+ /* 440 */ 246, 243, 246, 85, 250, 246, 250, 266, 85, 250,
+ /* 450 */ 85, 85, 85, 1, 85, 85, 85, 99, 62, 85,
+ /* 460 */ 84, 141, 99, 135, 99, 99, 99, 85, 99, 99,
+ /* 470 */ 99, 85, 139, 99, 85, 84, 147, 147, 149, 149,
+ /* 480 */ 84, 99, 147, 268, 149, 99, 201, 5, 99, 7,
+ /* 490 */ 5, 39, 7, 117, 147, 147, 149, 149, 147, 268,
+ /* 500 */ 149, 80, 81, 268, 268, 268, 251, 116, 201, 241,
+ /* 510 */ 201, 268, 268, 268, 268, 268, 241, 241, 241, 241,
+ /* 520 */ 241, 267, 201, 201, 201, 201, 277, 201, 201, 249,
+ /* 530 */ 249, 62, 201, 277, 272, 253, 249, 201, 201, 201,
+ /* 540 */ 201, 201, 201, 201, 201, 201, 201, 201, 126, 263,
+ /* 550 */ 201, 201, 264, 201, 201, 201, 201, 201, 201, 140,
+ /* 560 */ 201, 201, 201, 201, 272, 201, 138, 272, 272, 201,
+ /* 570 */ 133, 137, 262, 201, 201, 136, 201, 261, 201, 201,
+ /* 580 */ 201, 201, 201, 130, 201, 201, 201, 131, 201, 201,
+ /* 590 */ 129, 201, 260, 201, 201, 201, 201, 132, 201, 201,
+ /* 600 */ 201, 201, 201, 201, 201, 201, 201, 201, 201, 201,
+ /* 610 */ 201, 201, 201, 201, 142, 91, 203, 203, 203, 203,
+ /* 620 */ 203, 115, 53, 98, 97, 94, 96, 203, 57, 203,
+ /* 630 */ 203, 95, 93, 86, 5, 155, 5, 5, 203, 155,
+ /* 640 */ 5, 203, 209, 213, 213, 209, 5, 102, 144, 101,
+ /* 650 */ 203, 122, 117, 203, 84, 123, 99, 85, 204, 216,
+ /* 660 */ 203, 220, 222, 221, 219, 217, 215, 218, 204, 203,
+ /* 670 */ 224, 204, 240, 210, 204, 84, 203, 259, 258, 203,
+ /* 680 */ 257, 256, 205, 254, 240, 99, 85, 84, 99, 85,
+ /* 690 */ 84, 1, 85, 84, 99, 85, 84, 84, 134, 99,
+ /* 700 */ 134, 84, 84, 84, 117, 80, 118, 5, 72, 89,
+ /* 710 */ 88, 5, 89, 9, 88, 5, 5, 5, 5, 5,
+ /* 720 */ 5, 87, 15, 80, 84, 26, 61, 85, 84, 149,
+ /* 730 */ 119, 149, 16, 16, 149, 5, 99, 5, 85, 149,
/* 740 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
- /* 750 */ 5, 5, 5, 5, 5, 99, 87, 62, 0, 278,
- /* 760 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 770 */ 278, 21, 21, 278, 278, 278, 278, 278, 278, 278,
- /* 780 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 790 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 800 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 810 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 820 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 830 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 840 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 850 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 860 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 870 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 880 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 890 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 900 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 910 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 920 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 930 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 940 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 950 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
- /* 960 */ 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ /* 750 */ 5, 5, 5, 5, 5, 99, 87, 62, 0, 280,
+ /* 760 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 770 */ 280, 21, 21, 280, 280, 280, 280, 280, 280, 280,
+ /* 780 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 790 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 800 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 810 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 820 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 830 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 840 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 850 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 860 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 870 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 880 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 890 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 900 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 910 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 920 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 930 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 940 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 950 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 960 */ 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ /* 970 */ 280,
};
#define YY_SHIFT_COUNT (367)
#define YY_SHIFT_MIN (0)
#define YY_SHIFT_MAX (758)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 186, 113, 113, 260, 260, 98, 250, 266, 266, 193,
+ /* 0 */ 187, 113, 113, 261, 261, 25, 251, 267, 267, 150,
/* 10 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
- /* 20 */ 23, 23, 23, 0, 122, 266, 316, 316, 316, 9,
+ /* 20 */ 23, 23, 23, 0, 122, 267, 317, 317, 317, 9,
/* 30 */ 9, 23, 23, 18, 23, 78, 23, 23, 23, 23,
- /* 40 */ 308, 98, 112, 112, 61, 773, 773, 773, 266, 266,
- /* 50 */ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266,
- /* 60 */ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266,
- /* 70 */ 316, 316, 316, 2, 2, 2, 2, 2, 2, 2,
+ /* 40 */ 325, 25, 110, 110, 48, 773, 773, 773, 267, 267,
+ /* 50 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267,
+ /* 60 */ 267, 267, 267, 267, 267, 267, 267, 267, 267, 267,
+ /* 70 */ 317, 317, 317, 2, 2, 2, 2, 2, 2, 2,
/* 80 */ 23, 23, 23, 64, 23, 23, 23, 9, 9, 23,
- /* 90 */ 23, 23, 23, 286, 286, 313, 9, 23, 23, 23,
+ /* 90 */ 23, 23, 23, 328, 328, 314, 9, 23, 23, 23,
/* 100 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
/* 110 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
/* 120 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
/* 130 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
/* 140 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
- /* 150 */ 23, 23, 23, 23, 23, 23, 465, 465, 465, 425,
- /* 160 */ 425, 425, 425, 465, 465, 427, 426, 428, 430, 424,
- /* 170 */ 443, 446, 448, 451, 452, 465, 465, 465, 450, 450,
- /* 180 */ 505, 98, 98, 465, 465, 523, 528, 573, 533, 532,
- /* 190 */ 572, 535, 538, 505, 61, 465, 465, 546, 546, 465,
- /* 200 */ 546, 465, 546, 465, 465, 773, 773, 29, 69, 69,
- /* 210 */ 99, 69, 127, 170, 240, 252, 252, 252, 252, 252,
- /* 220 */ 252, 271, 283, 40, 40, 40, 40, 233, 256, 156,
- /* 230 */ 315, 257, 257, 317, 375, 361, 344, 340, 346, 347,
- /* 240 */ 352, 355, 356, 218, 335, 357, 359, 360, 362, 364,
- /* 250 */ 366, 367, 368, 227, 395, 378, 377, 323, 324, 328,
- /* 260 */ 473, 477, 338, 339, 376, 342, 384, 628, 482, 633,
- /* 270 */ 634, 485, 638, 639, 543, 549, 507, 530, 536, 570,
- /* 280 */ 534, 571, 574, 556, 560, 596, 598, 599, 604, 605,
- /* 290 */ 590, 607, 608, 610, 691, 612, 600, 563, 601, 564,
- /* 300 */ 617, 536, 618, 586, 620, 587, 626, 619, 621, 635,
- /* 310 */ 705, 622, 624, 704, 709, 710, 711, 712, 713, 714,
- /* 320 */ 715, 636, 706, 642, 640, 641, 606, 643, 702, 668,
- /* 330 */ 716, 581, 582, 637, 637, 637, 637, 717, 585, 588,
- /* 340 */ 637, 637, 637, 730, 733, 654, 637, 735, 736, 737,
+ /* 150 */ 23, 23, 23, 23, 23, 23, 469, 469, 469, 422,
+ /* 160 */ 422, 422, 422, 469, 469, 428, 419, 437, 434, 439,
+ /* 170 */ 456, 453, 461, 465, 472, 469, 469, 469, 524, 524,
+ /* 180 */ 506, 25, 25, 469, 469, 525, 527, 569, 531, 530,
+ /* 190 */ 571, 536, 539, 506, 48, 469, 469, 547, 547, 469,
+ /* 200 */ 547, 469, 547, 469, 469, 773, 773, 29, 69, 69,
+ /* 210 */ 99, 69, 127, 170, 241, 253, 253, 253, 253, 253,
+ /* 220 */ 253, 272, 284, 40, 40, 40, 40, 234, 248, 50,
+ /* 230 */ 167, 191, 191, 153, 294, 242, 263, 358, 238, 240,
+ /* 240 */ 363, 365, 366, 320, 333, 367, 369, 370, 371, 374,
+ /* 250 */ 376, 382, 386, 452, 396, 269, 389, 329, 330, 335,
+ /* 260 */ 482, 485, 347, 348, 391, 351, 421, 629, 480, 631,
+ /* 270 */ 632, 484, 635, 641, 545, 548, 504, 529, 535, 570,
+ /* 280 */ 532, 572, 591, 557, 586, 601, 603, 604, 606, 607,
+ /* 290 */ 589, 609, 610, 612, 690, 613, 595, 564, 600, 566,
+ /* 300 */ 617, 535, 618, 587, 619, 588, 625, 620, 622, 636,
+ /* 310 */ 702, 623, 626, 704, 706, 710, 711, 712, 713, 714,
+ /* 320 */ 715, 634, 707, 643, 640, 642, 611, 644, 699, 665,
+ /* 330 */ 716, 580, 582, 637, 637, 637, 637, 717, 585, 590,
+ /* 340 */ 637, 637, 637, 730, 732, 653, 637, 735, 736, 737,
/* 350 */ 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
/* 360 */ 748, 749, 656, 669, 750, 751, 695, 758,
};
#define YY_REDUCE_COUNT (206)
-#define YY_REDUCE_MIN (-266)
-#define YY_REDUCE_MAX (479)
+#define YY_REDUCE_MIN (-268)
+#define YY_REDUCE_MAX (477)
static const short yy_reduce_ofst[] = {
- /* 0 */ -186, 10, 10, -163, -163, 154, -159, -5, 42, -168,
- /* 10 */ -112, -115, 117, -41, 30, 51, 84, 95, 167, 169,
- /* 20 */ 176, 178, 179, -195, -197, -258, -239, -201, -143, -229,
- /* 30 */ -130, -62, 59, -238, -57, 47, 188, 191, 206, -88,
- /* 40 */ 48, 207, 115, 160, 119, -99, 22, 215, -266, -261,
- /* 50 */ -213, -58, 180, 224, 228, 229, 230, 232, 234, 236,
- /* 60 */ 237, 238, 239, 241, 242, 243, 244, 245, 246, 247,
- /* 70 */ 27, 35, 265, 254, 267, 276, 277, 278, 279, 280,
- /* 80 */ 282, 300, 321, 258, 322, 325, 326, 275, 281, 327,
- /* 90 */ 330, 331, 332, 259, 261, 284, 285, 334, 341, 343,
- /* 100 */ 345, 348, 349, 350, 351, 353, 354, 358, 363, 365,
- /* 110 */ 369, 370, 371, 372, 373, 374, 379, 380, 381, 382,
- /* 120 */ 383, 385, 386, 387, 388, 389, 390, 391, 392, 393,
- /* 130 */ 394, 396, 397, 398, 399, 400, 401, 402, 403, 404,
- /* 140 */ 405, 406, 407, 408, 409, 410, 411, 412, 413, 414,
- /* 150 */ 415, 416, 417, 418, 419, 420, 336, 337, 421, 269,
- /* 160 */ 273, 288, 289, 422, 423, 292, 294, 296, 287, 305,
- /* 170 */ 318, 429, 290, 432, 431, 433, 434, 435, 436, 437,
- /* 180 */ 438, 439, 442, 440, 441, 444, 447, 445, 453, 454,
- /* 190 */ 455, 449, 456, 457, 458, 459, 460, 466, 470, 461,
- /* 200 */ 471, 474, 472, 476, 478, 479, 475,
+ /* 0 */ -188, 10, 10, -165, -165, 53, -161, -1, 65, -170,
+ /* 10 */ -114, -17, 116, 119, 133, 172, 175, 177, 180, 188,
+ /* 20 */ 194, 196, 199, -197, -199, -260, -241, -203, -145, -231,
+ /* 30 */ -132, -64, 90, -240, -59, -44, 190, 200, 202, 27,
+ /* 40 */ 197, 198, 201, 217, 117, 181, 218, 204, -268, -263,
+ /* 50 */ -202, -184, -107, -60, -39, -14, 7, 32, 74, 148,
+ /* 60 */ 215, 231, 235, 236, 237, 243, 244, 245, 246, 247,
+ /* 70 */ 70, 184, 255, 126, 268, 275, 276, 277, 278, 279,
+ /* 80 */ 285, 307, 309, 254, 321, 322, 323, 280, 281, 324,
+ /* 90 */ 326, 327, 331, 249, 256, 282, 287, 336, 337, 338,
+ /* 100 */ 339, 340, 341, 342, 343, 344, 345, 346, 349, 350,
+ /* 110 */ 352, 353, 354, 355, 356, 357, 359, 360, 361, 362,
+ /* 120 */ 364, 368, 372, 373, 375, 377, 378, 379, 380, 381,
+ /* 130 */ 383, 384, 385, 387, 388, 390, 392, 393, 394, 395,
+ /* 140 */ 397, 398, 399, 400, 401, 402, 403, 404, 405, 406,
+ /* 150 */ 407, 408, 409, 410, 411, 412, 413, 414, 415, 262,
+ /* 160 */ 292, 295, 296, 416, 417, 288, 286, 310, 316, 332,
+ /* 170 */ 418, 420, 423, 425, 429, 424, 426, 427, 430, 431,
+ /* 180 */ 432, 433, 436, 435, 438, 440, 442, 441, 443, 445,
+ /* 190 */ 448, 449, 451, 444, 446, 447, 450, 454, 464, 457,
+ /* 200 */ 467, 466, 470, 473, 476, 463, 477,
};
static const YYACTIONTYPE yy_default[] = {
/* 0 */ 870, 994, 933, 1004, 920, 930, 1145, 1145, 1145, 870,
@@ -718,6 +708,7 @@ static const YYCODETYPE yyFallback[] = {
1, /* INSERT => ID */
1, /* INTO => ID */
1, /* VALUES => ID */
+ 1, /* FILE => ID */
};
#endif /* YYFALLBACK */
@@ -757,7 +748,6 @@ struct yyParser {
int yyerrcnt; /* Shifts left before out of the error */
#endif
ParseARG_SDECL /* A place to hold %extra_argument */
- ParseCTX_SDECL /* A place to hold %extra_context */
#if YYSTACKDEPTH<=0
int yystksz; /* Current side of the stack */
yyStackEntry *yystack; /* The parser's stack */
@@ -1002,87 +992,89 @@ static const char *const yyTokenName[] = {
/* 194 */ "INSERT",
/* 195 */ "INTO",
/* 196 */ "VALUES",
- /* 197 */ "program",
- /* 198 */ "cmd",
- /* 199 */ "ids",
- /* 200 */ "dbPrefix",
- /* 201 */ "cpxName",
- /* 202 */ "ifexists",
- /* 203 */ "alter_db_optr",
- /* 204 */ "alter_topic_optr",
- /* 205 */ "acct_optr",
- /* 206 */ "exprlist",
- /* 207 */ "ifnotexists",
- /* 208 */ "db_optr",
- /* 209 */ "topic_optr",
- /* 210 */ "typename",
- /* 211 */ "bufsize",
- /* 212 */ "pps",
- /* 213 */ "tseries",
- /* 214 */ "dbs",
- /* 215 */ "streams",
- /* 216 */ "storage",
- /* 217 */ "qtime",
- /* 218 */ "users",
- /* 219 */ "conns",
- /* 220 */ "state",
- /* 221 */ "intitemlist",
- /* 222 */ "intitem",
- /* 223 */ "keep",
- /* 224 */ "cache",
- /* 225 */ "replica",
- /* 226 */ "quorum",
- /* 227 */ "days",
- /* 228 */ "minrows",
- /* 229 */ "maxrows",
- /* 230 */ "blocks",
- /* 231 */ "ctime",
- /* 232 */ "wal",
- /* 233 */ "fsync",
- /* 234 */ "comp",
- /* 235 */ "prec",
- /* 236 */ "update",
- /* 237 */ "cachelast",
- /* 238 */ "partitions",
- /* 239 */ "signed",
- /* 240 */ "create_table_args",
- /* 241 */ "create_stable_args",
- /* 242 */ "create_table_list",
- /* 243 */ "create_from_stable",
- /* 244 */ "columnlist",
- /* 245 */ "tagitemlist",
- /* 246 */ "tagNamelist",
- /* 247 */ "select",
- /* 248 */ "column",
- /* 249 */ "tagitem",
- /* 250 */ "selcollist",
- /* 251 */ "from",
- /* 252 */ "where_opt",
- /* 253 */ "interval_option",
- /* 254 */ "sliding_opt",
- /* 255 */ "session_option",
- /* 256 */ "windowstate_option",
- /* 257 */ "fill_opt",
- /* 258 */ "groupby_opt",
- /* 259 */ "having_opt",
- /* 260 */ "orderby_opt",
- /* 261 */ "slimit_opt",
- /* 262 */ "limit_opt",
- /* 263 */ "union",
- /* 264 */ "sclp",
- /* 265 */ "distinct",
- /* 266 */ "expr",
- /* 267 */ "as",
- /* 268 */ "tablelist",
- /* 269 */ "sub",
- /* 270 */ "tmvar",
- /* 271 */ "intervalKey",
- /* 272 */ "sortlist",
- /* 273 */ "sortitem",
- /* 274 */ "item",
- /* 275 */ "sortorder",
- /* 276 */ "grouplist",
- /* 277 */ "expritem",
+ /* 197 */ "FILE",
+ /* 198 */ "error",
+ /* 199 */ "program",
+ /* 200 */ "cmd",
+ /* 201 */ "ids",
+ /* 202 */ "dbPrefix",
+ /* 203 */ "cpxName",
+ /* 204 */ "ifexists",
+ /* 205 */ "alter_db_optr",
+ /* 206 */ "alter_topic_optr",
+ /* 207 */ "acct_optr",
+ /* 208 */ "exprlist",
+ /* 209 */ "ifnotexists",
+ /* 210 */ "db_optr",
+ /* 211 */ "topic_optr",
+ /* 212 */ "typename",
+ /* 213 */ "bufsize",
+ /* 214 */ "pps",
+ /* 215 */ "tseries",
+ /* 216 */ "dbs",
+ /* 217 */ "streams",
+ /* 218 */ "storage",
+ /* 219 */ "qtime",
+ /* 220 */ "users",
+ /* 221 */ "conns",
+ /* 222 */ "state",
+ /* 223 */ "intitemlist",
+ /* 224 */ "intitem",
+ /* 225 */ "keep",
+ /* 226 */ "cache",
+ /* 227 */ "replica",
+ /* 228 */ "quorum",
+ /* 229 */ "days",
+ /* 230 */ "minrows",
+ /* 231 */ "maxrows",
+ /* 232 */ "blocks",
+ /* 233 */ "ctime",
+ /* 234 */ "wal",
+ /* 235 */ "fsync",
+ /* 236 */ "comp",
+ /* 237 */ "prec",
+ /* 238 */ "update",
+ /* 239 */ "cachelast",
+ /* 240 */ "partitions",
+ /* 241 */ "signed",
+ /* 242 */ "create_table_args",
+ /* 243 */ "create_stable_args",
+ /* 244 */ "create_table_list",
+ /* 245 */ "create_from_stable",
+ /* 246 */ "columnlist",
+ /* 247 */ "tagitemlist",
+ /* 248 */ "tagNamelist",
+ /* 249 */ "select",
+ /* 250 */ "column",
+ /* 251 */ "tagitem",
+ /* 252 */ "selcollist",
+ /* 253 */ "from",
+ /* 254 */ "where_opt",
+ /* 255 */ "interval_option",
+ /* 256 */ "sliding_opt",
+ /* 257 */ "session_option",
+ /* 258 */ "windowstate_option",
+ /* 259 */ "fill_opt",
+ /* 260 */ "groupby_opt",
+ /* 261 */ "having_opt",
+ /* 262 */ "orderby_opt",
+ /* 263 */ "slimit_opt",
+ /* 264 */ "limit_opt",
+ /* 265 */ "union",
+ /* 266 */ "sclp",
+ /* 267 */ "distinct",
+ /* 268 */ "expr",
+ /* 269 */ "as",
+ /* 270 */ "tablelist",
+ /* 271 */ "sub",
+ /* 272 */ "tmvar",
+ /* 273 */ "intervalKey",
+ /* 274 */ "sortlist",
+ /* 275 */ "sortitem",
+ /* 276 */ "item",
+ /* 277 */ "sortorder",
+ /* 278 */ "grouplist",
+ /* 279 */ "expritem",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -1432,29 +1424,28 @@ static int yyGrowStack(yyParser *p){
/* Initialize a new parser that has already been allocated.
*/
-void ParseInit(void *yypRawParser ParseCTX_PDECL){
- yyParser *yypParser = (yyParser*)yypRawParser;
- ParseCTX_STORE
+void ParseInit(void *yypParser){
+ yyParser *pParser = (yyParser*)yypParser;
#ifdef YYTRACKMAXSTACKDEPTH
- yypParser->yyhwm = 0;
+ pParser->yyhwm = 0;
#endif
#if YYSTACKDEPTH<=0
- yypParser->yytos = NULL;
- yypParser->yystack = NULL;
- yypParser->yystksz = 0;
- if( yyGrowStack(yypParser) ){
- yypParser->yystack = &yypParser->yystk0;
- yypParser->yystksz = 1;
+ pParser->yytos = NULL;
+ pParser->yystack = NULL;
+ pParser->yystksz = 0;
+ if( yyGrowStack(pParser) ){
+ pParser->yystack = &pParser->yystk0;
+ pParser->yystksz = 1;
}
#endif
#ifndef YYNOERRORRECOVERY
- yypParser->yyerrcnt = -1;
+ pParser->yyerrcnt = -1;
#endif
- yypParser->yytos = yypParser->yystack;
- yypParser->yystack[0].stateno = 0;
- yypParser->yystack[0].major = 0;
+ pParser->yytos = pParser->yystack;
+ pParser->yystack[0].stateno = 0;
+ pParser->yystack[0].major = 0;
#if YYSTACKDEPTH>0
- yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1];
+ pParser->yystackEnd = &pParser->yystack[YYSTACKDEPTH-1];
#endif
}
@@ -1471,14 +1462,11 @@ void ParseInit(void *yypRawParser ParseCTX_PDECL){
** A pointer to a parser. This pointer is used in subsequent calls
** to Parse and ParseFree.
*/
-void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){
- yyParser *yypParser;
- yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
- if( yypParser ){
- ParseCTX_STORE
- ParseInit(yypParser ParseCTX_PARAM);
- }
- return (void*)yypParser;
+void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){
+ yyParser *pParser;
+ pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
+ if( pParser ) ParseInit(pParser);
+ return pParser;
}
#endif /* Parse_ENGINEALWAYSONSTACK */
@@ -1495,8 +1483,7 @@ static void yy_destructor(
YYCODETYPE yymajor, /* Type code for object to destroy */
YYMINORTYPE *yypminor /* The object to be destroyed */
){
- ParseARG_FETCH
- ParseCTX_FETCH
+ ParseARG_FETCH;
switch( yymajor ){
/* Here is inserted the actions which take place when a
** terminal or non-terminal is destroyed. This can happen
@@ -1509,60 +1496,60 @@ static void yy_destructor(
** inside the C code.
*/
/********* Begin destructor definitions ***************************************/
- case 206: /* exprlist */
- case 250: /* selcollist */
- case 264: /* sclp */
+ case 208: /* exprlist */
+ case 252: /* selcollist */
+ case 266: /* sclp */
{
-tSqlExprListDestroy((yypminor->yy421));
+tSqlExprListDestroy((yypminor->yy221));
}
break;
- case 221: /* intitemlist */
- case 223: /* keep */
- case 244: /* columnlist */
- case 245: /* tagitemlist */
- case 246: /* tagNamelist */
- case 257: /* fill_opt */
- case 258: /* groupby_opt */
- case 260: /* orderby_opt */
- case 272: /* sortlist */
- case 276: /* grouplist */
-{
-taosArrayDestroy((yypminor->yy421));
+ case 223: /* intitemlist */
+ case 225: /* keep */
+ case 246: /* columnlist */
+ case 247: /* tagitemlist */
+ case 248: /* tagNamelist */
+ case 259: /* fill_opt */
+ case 260: /* groupby_opt */
+ case 262: /* orderby_opt */
+ case 274: /* sortlist */
+ case 278: /* grouplist */
+{
+taosArrayDestroy((yypminor->yy221));
}
break;
- case 242: /* create_table_list */
+ case 244: /* create_table_list */
{
-destroyCreateTableSql((yypminor->yy438));
+destroyCreateTableSql((yypminor->yy102));
}
break;
- case 247: /* select */
+ case 249: /* select */
{
-destroySqlNode((yypminor->yy56));
+destroySqlNode((yypminor->yy376));
}
break;
- case 251: /* from */
- case 268: /* tablelist */
- case 269: /* sub */
+ case 253: /* from */
+ case 270: /* tablelist */
+ case 271: /* sub */
{
-destroyRelationInfo((yypminor->yy8));
+destroyRelationInfo((yypminor->yy164));
}
break;
- case 252: /* where_opt */
- case 259: /* having_opt */
- case 266: /* expr */
- case 277: /* expritem */
+ case 254: /* where_opt */
+ case 261: /* having_opt */
+ case 268: /* expr */
+ case 279: /* expritem */
{
-tSqlExprDestroy((yypminor->yy439));
+tSqlExprDestroy((yypminor->yy146));
}
break;
- case 263: /* union */
+ case 265: /* union */
{
-destroyAllSqlNode((yypminor->yy421));
+destroyAllSqlNode((yypminor->yy221));
}
break;
- case 273: /* sortitem */
+ case 275: /* sortitem */
{
-tVariantDestroy(&(yypminor->yy430));
+tVariantDestroy(&(yypminor->yy106));
}
break;
/********* End destructor definitions *****************************************/
@@ -1674,12 +1661,13 @@ int ParseCoverage(FILE *out){
** Find the appropriate action for a parser given the terminal
** look-ahead token iLookAhead.
*/
-static YYACTIONTYPE yy_find_shift_action(
- YYCODETYPE iLookAhead, /* The look-ahead token */
- YYACTIONTYPE stateno /* Current state number */
+static unsigned int yy_find_shift_action(
+ yyParser *pParser, /* The parser */
+ YYCODETYPE iLookAhead /* The look-ahead token */
){
int i;
-
+ int stateno = pParser->yytos->stateno;
+
if( stateno>YY_MAX_SHIFT ) return stateno;
assert( stateno <= YY_SHIFT_COUNT );
#if defined(YYCOVERAGE)
@@ -1687,19 +1675,15 @@ static YYACTIONTYPE yy_find_shift_action(
#endif
do{
i = yy_shift_ofst[stateno];
- assert( i>=0 );
- assert( i<=YY_ACTTAB_COUNT );
- assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD );
+ assert( i>=0 && i+YYNTOKEN<=sizeof(yy_lookahead)/sizeof(yy_lookahead[0]) );
assert( iLookAhead!=YYNOCODE );
assert( iLookAhead < YYNTOKEN );
i += iLookAhead;
- assert( i<(int)YY_NLOOKAHEAD );
if( yy_lookahead[i]!=iLookAhead ){
#ifdef YYFALLBACK
YYCODETYPE iFallback; /* Fallback token */
- assert( iLookAhead %s\n",
@@ -1714,8 +1698,15 @@ static YYACTIONTYPE yy_find_shift_action(
#ifdef YYWILDCARD
{
int j = i - iLookAhead + YYWILDCARD;
- assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) );
- if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){
+ if(
+#if YY_SHIFT_MIN+YYWILDCARD<0
+ j>=0 &&
+#endif
+#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT
+ j0
+ ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
@@ -1729,7 +1720,6 @@ static YYACTIONTYPE yy_find_shift_action(
#endif /* YYWILDCARD */
return yy_default[stateno];
}else{
- assert( i>=0 && iyytos;
- yytos->stateno = yyNewState;
- yytos->major = yyMajor;
+ yytos->stateno = (YYACTIONTYPE)yyNewState;
+ yytos->major = (YYCODETYPE)yyMajor;
yytos->minor.yy0 = yyMinor;
yyTraceShift(yypParser, yyNewState, "Shift");
}
-/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side
-** of that rule */
-static const YYCODETYPE yyRuleInfoLhs[] = {
- 197, /* (0) program ::= cmd */
- 198, /* (1) cmd ::= SHOW DATABASES */
- 198, /* (2) cmd ::= SHOW TOPICS */
- 198, /* (3) cmd ::= SHOW FUNCTIONS */
- 198, /* (4) cmd ::= SHOW MNODES */
- 198, /* (5) cmd ::= SHOW DNODES */
- 198, /* (6) cmd ::= SHOW ACCOUNTS */
- 198, /* (7) cmd ::= SHOW USERS */
- 198, /* (8) cmd ::= SHOW MODULES */
- 198, /* (9) cmd ::= SHOW QUERIES */
- 198, /* (10) cmd ::= SHOW CONNECTIONS */
- 198, /* (11) cmd ::= SHOW STREAMS */
- 198, /* (12) cmd ::= SHOW VARIABLES */
- 198, /* (13) cmd ::= SHOW SCORES */
- 198, /* (14) cmd ::= SHOW GRANTS */
- 198, /* (15) cmd ::= SHOW VNODES */
- 198, /* (16) cmd ::= SHOW VNODES ids */
- 200, /* (17) dbPrefix ::= */
- 200, /* (18) dbPrefix ::= ids DOT */
- 201, /* (19) cpxName ::= */
- 201, /* (20) cpxName ::= DOT ids */
- 198, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */
- 198, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */
- 198, /* (23) cmd ::= SHOW CREATE DATABASE ids */
- 198, /* (24) cmd ::= SHOW dbPrefix TABLES */
- 198, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */
- 198, /* (26) cmd ::= SHOW dbPrefix STABLES */
- 198, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */
- 198, /* (28) cmd ::= SHOW dbPrefix VGROUPS */
- 198, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */
- 198, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */
- 198, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */
- 198, /* (32) cmd ::= DROP DATABASE ifexists ids */
- 198, /* (33) cmd ::= DROP TOPIC ifexists ids */
- 198, /* (34) cmd ::= DROP FUNCTION ids */
- 198, /* (35) cmd ::= DROP DNODE ids */
- 198, /* (36) cmd ::= DROP USER ids */
- 198, /* (37) cmd ::= DROP ACCOUNT ids */
- 198, /* (38) cmd ::= USE ids */
- 198, /* (39) cmd ::= DESCRIBE ids cpxName */
- 198, /* (40) cmd ::= DESC ids cpxName */
- 198, /* (41) cmd ::= ALTER USER ids PASS ids */
- 198, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */
- 198, /* (43) cmd ::= ALTER DNODE ids ids */
- 198, /* (44) cmd ::= ALTER DNODE ids ids ids */
- 198, /* (45) cmd ::= ALTER LOCAL ids */
- 198, /* (46) cmd ::= ALTER LOCAL ids ids */
- 198, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */
- 198, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */
- 198, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */
- 198, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
- 198, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */
- 199, /* (52) ids ::= ID */
- 199, /* (53) ids ::= STRING */
- 202, /* (54) ifexists ::= IF EXISTS */
- 202, /* (55) ifexists ::= */
- 207, /* (56) ifnotexists ::= IF NOT EXISTS */
- 207, /* (57) ifnotexists ::= */
- 198, /* (58) cmd ::= CREATE DNODE ids */
- 198, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
- 198, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
- 198, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
- 198, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- 198, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- 198, /* (64) cmd ::= CREATE USER ids PASS ids */
- 211, /* (65) bufsize ::= */
- 211, /* (66) bufsize ::= BUFSIZE INTEGER */
- 212, /* (67) pps ::= */
- 212, /* (68) pps ::= PPS INTEGER */
- 213, /* (69) tseries ::= */
- 213, /* (70) tseries ::= TSERIES INTEGER */
- 214, /* (71) dbs ::= */
- 214, /* (72) dbs ::= DBS INTEGER */
- 215, /* (73) streams ::= */
- 215, /* (74) streams ::= STREAMS INTEGER */
- 216, /* (75) storage ::= */
- 216, /* (76) storage ::= STORAGE INTEGER */
- 217, /* (77) qtime ::= */
- 217, /* (78) qtime ::= QTIME INTEGER */
- 218, /* (79) users ::= */
- 218, /* (80) users ::= USERS INTEGER */
- 219, /* (81) conns ::= */
- 219, /* (82) conns ::= CONNS INTEGER */
- 220, /* (83) state ::= */
- 220, /* (84) state ::= STATE ids */
- 205, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
- 221, /* (86) intitemlist ::= intitemlist COMMA intitem */
- 221, /* (87) intitemlist ::= intitem */
- 222, /* (88) intitem ::= INTEGER */
- 223, /* (89) keep ::= KEEP intitemlist */
- 224, /* (90) cache ::= CACHE INTEGER */
- 225, /* (91) replica ::= REPLICA INTEGER */
- 226, /* (92) quorum ::= QUORUM INTEGER */
- 227, /* (93) days ::= DAYS INTEGER */
- 228, /* (94) minrows ::= MINROWS INTEGER */
- 229, /* (95) maxrows ::= MAXROWS INTEGER */
- 230, /* (96) blocks ::= BLOCKS INTEGER */
- 231, /* (97) ctime ::= CTIME INTEGER */
- 232, /* (98) wal ::= WAL INTEGER */
- 233, /* (99) fsync ::= FSYNC INTEGER */
- 234, /* (100) comp ::= COMP INTEGER */
- 235, /* (101) prec ::= PRECISION STRING */
- 236, /* (102) update ::= UPDATE INTEGER */
- 237, /* (103) cachelast ::= CACHELAST INTEGER */
- 238, /* (104) partitions ::= PARTITIONS INTEGER */
- 208, /* (105) db_optr ::= */
- 208, /* (106) db_optr ::= db_optr cache */
- 208, /* (107) db_optr ::= db_optr replica */
- 208, /* (108) db_optr ::= db_optr quorum */
- 208, /* (109) db_optr ::= db_optr days */
- 208, /* (110) db_optr ::= db_optr minrows */
- 208, /* (111) db_optr ::= db_optr maxrows */
- 208, /* (112) db_optr ::= db_optr blocks */
- 208, /* (113) db_optr ::= db_optr ctime */
- 208, /* (114) db_optr ::= db_optr wal */
- 208, /* (115) db_optr ::= db_optr fsync */
- 208, /* (116) db_optr ::= db_optr comp */
- 208, /* (117) db_optr ::= db_optr prec */
- 208, /* (118) db_optr ::= db_optr keep */
- 208, /* (119) db_optr ::= db_optr update */
- 208, /* (120) db_optr ::= db_optr cachelast */
- 209, /* (121) topic_optr ::= db_optr */
- 209, /* (122) topic_optr ::= topic_optr partitions */
- 203, /* (123) alter_db_optr ::= */
- 203, /* (124) alter_db_optr ::= alter_db_optr replica */
- 203, /* (125) alter_db_optr ::= alter_db_optr quorum */
- 203, /* (126) alter_db_optr ::= alter_db_optr keep */
- 203, /* (127) alter_db_optr ::= alter_db_optr blocks */
- 203, /* (128) alter_db_optr ::= alter_db_optr comp */
- 203, /* (129) alter_db_optr ::= alter_db_optr update */
- 203, /* (130) alter_db_optr ::= alter_db_optr cachelast */
- 204, /* (131) alter_topic_optr ::= alter_db_optr */
- 204, /* (132) alter_topic_optr ::= alter_topic_optr partitions */
- 210, /* (133) typename ::= ids */
- 210, /* (134) typename ::= ids LP signed RP */
- 210, /* (135) typename ::= ids UNSIGNED */
- 239, /* (136) signed ::= INTEGER */
- 239, /* (137) signed ::= PLUS INTEGER */
- 239, /* (138) signed ::= MINUS INTEGER */
- 198, /* (139) cmd ::= CREATE TABLE create_table_args */
- 198, /* (140) cmd ::= CREATE TABLE create_stable_args */
- 198, /* (141) cmd ::= CREATE STABLE create_stable_args */
- 198, /* (142) cmd ::= CREATE TABLE create_table_list */
- 242, /* (143) create_table_list ::= create_from_stable */
- 242, /* (144) create_table_list ::= create_table_list create_from_stable */
- 240, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
- 241, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
- 243, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
- 243, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
- 246, /* (149) tagNamelist ::= tagNamelist COMMA ids */
- 246, /* (150) tagNamelist ::= ids */
- 240, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */
- 244, /* (152) columnlist ::= columnlist COMMA column */
- 244, /* (153) columnlist ::= column */
- 248, /* (154) column ::= ids typename */
- 245, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */
- 245, /* (156) tagitemlist ::= tagitem */
- 249, /* (157) tagitem ::= INTEGER */
- 249, /* (158) tagitem ::= FLOAT */
- 249, /* (159) tagitem ::= STRING */
- 249, /* (160) tagitem ::= BOOL */
- 249, /* (161) tagitem ::= NULL */
- 249, /* (162) tagitem ::= NOW */
- 249, /* (163) tagitem ::= MINUS INTEGER */
- 249, /* (164) tagitem ::= MINUS FLOAT */
- 249, /* (165) tagitem ::= PLUS INTEGER */
- 249, /* (166) tagitem ::= PLUS FLOAT */
- 247, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
- 247, /* (168) select ::= LP select RP */
- 263, /* (169) union ::= select */
- 263, /* (170) union ::= union UNION ALL select */
- 198, /* (171) cmd ::= union */
- 247, /* (172) select ::= SELECT selcollist */
- 264, /* (173) sclp ::= selcollist COMMA */
- 264, /* (174) sclp ::= */
- 250, /* (175) selcollist ::= sclp distinct expr as */
- 250, /* (176) selcollist ::= sclp STAR */
- 267, /* (177) as ::= AS ids */
- 267, /* (178) as ::= ids */
- 267, /* (179) as ::= */
- 265, /* (180) distinct ::= DISTINCT */
- 265, /* (181) distinct ::= */
- 251, /* (182) from ::= FROM tablelist */
- 251, /* (183) from ::= FROM sub */
- 269, /* (184) sub ::= LP union RP */
- 269, /* (185) sub ::= LP union RP ids */
- 269, /* (186) sub ::= sub COMMA LP union RP ids */
- 268, /* (187) tablelist ::= ids cpxName */
- 268, /* (188) tablelist ::= ids cpxName ids */
- 268, /* (189) tablelist ::= tablelist COMMA ids cpxName */
- 268, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */
- 270, /* (191) tmvar ::= VARIABLE */
- 253, /* (192) interval_option ::= intervalKey LP tmvar RP */
- 253, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
- 253, /* (194) interval_option ::= */
- 271, /* (195) intervalKey ::= INTERVAL */
- 271, /* (196) intervalKey ::= EVERY */
- 255, /* (197) session_option ::= */
- 255, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
- 256, /* (199) windowstate_option ::= */
- 256, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */
- 257, /* (201) fill_opt ::= */
- 257, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
- 257, /* (203) fill_opt ::= FILL LP ID RP */
- 254, /* (204) sliding_opt ::= SLIDING LP tmvar RP */
- 254, /* (205) sliding_opt ::= */
- 260, /* (206) orderby_opt ::= */
- 260, /* (207) orderby_opt ::= ORDER BY sortlist */
- 272, /* (208) sortlist ::= sortlist COMMA item sortorder */
- 272, /* (209) sortlist ::= item sortorder */
- 274, /* (210) item ::= ids cpxName */
- 275, /* (211) sortorder ::= ASC */
- 275, /* (212) sortorder ::= DESC */
- 275, /* (213) sortorder ::= */
- 258, /* (214) groupby_opt ::= */
- 258, /* (215) groupby_opt ::= GROUP BY grouplist */
- 276, /* (216) grouplist ::= grouplist COMMA item */
- 276, /* (217) grouplist ::= item */
- 259, /* (218) having_opt ::= */
- 259, /* (219) having_opt ::= HAVING expr */
- 262, /* (220) limit_opt ::= */
- 262, /* (221) limit_opt ::= LIMIT signed */
- 262, /* (222) limit_opt ::= LIMIT signed OFFSET signed */
- 262, /* (223) limit_opt ::= LIMIT signed COMMA signed */
- 261, /* (224) slimit_opt ::= */
- 261, /* (225) slimit_opt ::= SLIMIT signed */
- 261, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */
- 261, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */
- 252, /* (228) where_opt ::= */
- 252, /* (229) where_opt ::= WHERE expr */
- 266, /* (230) expr ::= LP expr RP */
- 266, /* (231) expr ::= ID */
- 266, /* (232) expr ::= ID DOT ID */
- 266, /* (233) expr ::= ID DOT STAR */
- 266, /* (234) expr ::= INTEGER */
- 266, /* (235) expr ::= MINUS INTEGER */
- 266, /* (236) expr ::= PLUS INTEGER */
- 266, /* (237) expr ::= FLOAT */
- 266, /* (238) expr ::= MINUS FLOAT */
- 266, /* (239) expr ::= PLUS FLOAT */
- 266, /* (240) expr ::= STRING */
- 266, /* (241) expr ::= NOW */
- 266, /* (242) expr ::= VARIABLE */
- 266, /* (243) expr ::= PLUS VARIABLE */
- 266, /* (244) expr ::= MINUS VARIABLE */
- 266, /* (245) expr ::= BOOL */
- 266, /* (246) expr ::= NULL */
- 266, /* (247) expr ::= ID LP exprlist RP */
- 266, /* (248) expr ::= ID LP STAR RP */
- 266, /* (249) expr ::= expr IS NULL */
- 266, /* (250) expr ::= expr IS NOT NULL */
- 266, /* (251) expr ::= expr LT expr */
- 266, /* (252) expr ::= expr GT expr */
- 266, /* (253) expr ::= expr LE expr */
- 266, /* (254) expr ::= expr GE expr */
- 266, /* (255) expr ::= expr NE expr */
- 266, /* (256) expr ::= expr EQ expr */
- 266, /* (257) expr ::= expr BETWEEN expr AND expr */
- 266, /* (258) expr ::= expr AND expr */
- 266, /* (259) expr ::= expr OR expr */
- 266, /* (260) expr ::= expr PLUS expr */
- 266, /* (261) expr ::= expr MINUS expr */
- 266, /* (262) expr ::= expr STAR expr */
- 266, /* (263) expr ::= expr SLASH expr */
- 266, /* (264) expr ::= expr REM expr */
- 266, /* (265) expr ::= expr LIKE expr */
- 266, /* (266) expr ::= expr MATCH expr */
- 266, /* (267) expr ::= expr NMATCH expr */
- 266, /* (268) expr ::= expr IN LP exprlist RP */
- 206, /* (269) exprlist ::= exprlist COMMA expritem */
- 206, /* (270) exprlist ::= expritem */
- 277, /* (271) expritem ::= expr */
- 277, /* (272) expritem ::= */
- 198, /* (273) cmd ::= RESET QUERY CACHE */
- 198, /* (274) cmd ::= SYNCDB ids REPLICA */
- 198, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- 198, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- 198, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
- 198, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- 198, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- 198, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- 198, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- 198, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
- 198, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- 198, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- 198, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
- 198, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- 198, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- 198, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- 198, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
- 198, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
- 198, /* (291) cmd ::= KILL CONNECTION INTEGER */
- 198, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- 198, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */
-};
-
-/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
-** of symbols on the right-hand side of that rule. */
-static const signed char yyRuleInfoNRhs[] = {
- -1, /* (0) program ::= cmd */
- -2, /* (1) cmd ::= SHOW DATABASES */
- -2, /* (2) cmd ::= SHOW TOPICS */
- -2, /* (3) cmd ::= SHOW FUNCTIONS */
- -2, /* (4) cmd ::= SHOW MNODES */
- -2, /* (5) cmd ::= SHOW DNODES */
- -2, /* (6) cmd ::= SHOW ACCOUNTS */
- -2, /* (7) cmd ::= SHOW USERS */
- -2, /* (8) cmd ::= SHOW MODULES */
- -2, /* (9) cmd ::= SHOW QUERIES */
- -2, /* (10) cmd ::= SHOW CONNECTIONS */
- -2, /* (11) cmd ::= SHOW STREAMS */
- -2, /* (12) cmd ::= SHOW VARIABLES */
- -2, /* (13) cmd ::= SHOW SCORES */
- -2, /* (14) cmd ::= SHOW GRANTS */
- -2, /* (15) cmd ::= SHOW VNODES */
- -3, /* (16) cmd ::= SHOW VNODES ids */
- 0, /* (17) dbPrefix ::= */
- -2, /* (18) dbPrefix ::= ids DOT */
- 0, /* (19) cpxName ::= */
- -2, /* (20) cpxName ::= DOT ids */
- -5, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */
- -5, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */
- -4, /* (23) cmd ::= SHOW CREATE DATABASE ids */
- -3, /* (24) cmd ::= SHOW dbPrefix TABLES */
- -5, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */
- -3, /* (26) cmd ::= SHOW dbPrefix STABLES */
- -5, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */
- -3, /* (28) cmd ::= SHOW dbPrefix VGROUPS */
- -4, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */
- -5, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */
- -5, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */
- -4, /* (32) cmd ::= DROP DATABASE ifexists ids */
- -4, /* (33) cmd ::= DROP TOPIC ifexists ids */
- -3, /* (34) cmd ::= DROP FUNCTION ids */
- -3, /* (35) cmd ::= DROP DNODE ids */
- -3, /* (36) cmd ::= DROP USER ids */
- -3, /* (37) cmd ::= DROP ACCOUNT ids */
- -2, /* (38) cmd ::= USE ids */
- -3, /* (39) cmd ::= DESCRIBE ids cpxName */
- -3, /* (40) cmd ::= DESC ids cpxName */
- -5, /* (41) cmd ::= ALTER USER ids PASS ids */
- -5, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */
- -4, /* (43) cmd ::= ALTER DNODE ids ids */
- -5, /* (44) cmd ::= ALTER DNODE ids ids ids */
- -3, /* (45) cmd ::= ALTER LOCAL ids */
- -4, /* (46) cmd ::= ALTER LOCAL ids ids */
- -4, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */
- -4, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */
- -4, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */
- -6, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
- -6, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */
- -1, /* (52) ids ::= ID */
- -1, /* (53) ids ::= STRING */
- -2, /* (54) ifexists ::= IF EXISTS */
- 0, /* (55) ifexists ::= */
- -3, /* (56) ifnotexists ::= IF NOT EXISTS */
- 0, /* (57) ifnotexists ::= */
- -3, /* (58) cmd ::= CREATE DNODE ids */
- -6, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
- -5, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
- -5, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
- -8, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- -9, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
- -5, /* (64) cmd ::= CREATE USER ids PASS ids */
- 0, /* (65) bufsize ::= */
- -2, /* (66) bufsize ::= BUFSIZE INTEGER */
- 0, /* (67) pps ::= */
- -2, /* (68) pps ::= PPS INTEGER */
- 0, /* (69) tseries ::= */
- -2, /* (70) tseries ::= TSERIES INTEGER */
- 0, /* (71) dbs ::= */
- -2, /* (72) dbs ::= DBS INTEGER */
- 0, /* (73) streams ::= */
- -2, /* (74) streams ::= STREAMS INTEGER */
- 0, /* (75) storage ::= */
- -2, /* (76) storage ::= STORAGE INTEGER */
- 0, /* (77) qtime ::= */
- -2, /* (78) qtime ::= QTIME INTEGER */
- 0, /* (79) users ::= */
- -2, /* (80) users ::= USERS INTEGER */
- 0, /* (81) conns ::= */
- -2, /* (82) conns ::= CONNS INTEGER */
- 0, /* (83) state ::= */
- -2, /* (84) state ::= STATE ids */
- -9, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
- -3, /* (86) intitemlist ::= intitemlist COMMA intitem */
- -1, /* (87) intitemlist ::= intitem */
- -1, /* (88) intitem ::= INTEGER */
- -2, /* (89) keep ::= KEEP intitemlist */
- -2, /* (90) cache ::= CACHE INTEGER */
- -2, /* (91) replica ::= REPLICA INTEGER */
- -2, /* (92) quorum ::= QUORUM INTEGER */
- -2, /* (93) days ::= DAYS INTEGER */
- -2, /* (94) minrows ::= MINROWS INTEGER */
- -2, /* (95) maxrows ::= MAXROWS INTEGER */
- -2, /* (96) blocks ::= BLOCKS INTEGER */
- -2, /* (97) ctime ::= CTIME INTEGER */
- -2, /* (98) wal ::= WAL INTEGER */
- -2, /* (99) fsync ::= FSYNC INTEGER */
- -2, /* (100) comp ::= COMP INTEGER */
- -2, /* (101) prec ::= PRECISION STRING */
- -2, /* (102) update ::= UPDATE INTEGER */
- -2, /* (103) cachelast ::= CACHELAST INTEGER */
- -2, /* (104) partitions ::= PARTITIONS INTEGER */
- 0, /* (105) db_optr ::= */
- -2, /* (106) db_optr ::= db_optr cache */
- -2, /* (107) db_optr ::= db_optr replica */
- -2, /* (108) db_optr ::= db_optr quorum */
- -2, /* (109) db_optr ::= db_optr days */
- -2, /* (110) db_optr ::= db_optr minrows */
- -2, /* (111) db_optr ::= db_optr maxrows */
- -2, /* (112) db_optr ::= db_optr blocks */
- -2, /* (113) db_optr ::= db_optr ctime */
- -2, /* (114) db_optr ::= db_optr wal */
- -2, /* (115) db_optr ::= db_optr fsync */
- -2, /* (116) db_optr ::= db_optr comp */
- -2, /* (117) db_optr ::= db_optr prec */
- -2, /* (118) db_optr ::= db_optr keep */
- -2, /* (119) db_optr ::= db_optr update */
- -2, /* (120) db_optr ::= db_optr cachelast */
- -1, /* (121) topic_optr ::= db_optr */
- -2, /* (122) topic_optr ::= topic_optr partitions */
- 0, /* (123) alter_db_optr ::= */
- -2, /* (124) alter_db_optr ::= alter_db_optr replica */
- -2, /* (125) alter_db_optr ::= alter_db_optr quorum */
- -2, /* (126) alter_db_optr ::= alter_db_optr keep */
- -2, /* (127) alter_db_optr ::= alter_db_optr blocks */
- -2, /* (128) alter_db_optr ::= alter_db_optr comp */
- -2, /* (129) alter_db_optr ::= alter_db_optr update */
- -2, /* (130) alter_db_optr ::= alter_db_optr cachelast */
- -1, /* (131) alter_topic_optr ::= alter_db_optr */
- -2, /* (132) alter_topic_optr ::= alter_topic_optr partitions */
- -1, /* (133) typename ::= ids */
- -4, /* (134) typename ::= ids LP signed RP */
- -2, /* (135) typename ::= ids UNSIGNED */
- -1, /* (136) signed ::= INTEGER */
- -2, /* (137) signed ::= PLUS INTEGER */
- -2, /* (138) signed ::= MINUS INTEGER */
- -3, /* (139) cmd ::= CREATE TABLE create_table_args */
- -3, /* (140) cmd ::= CREATE TABLE create_stable_args */
- -3, /* (141) cmd ::= CREATE STABLE create_stable_args */
- -3, /* (142) cmd ::= CREATE TABLE create_table_list */
- -1, /* (143) create_table_list ::= create_from_stable */
- -2, /* (144) create_table_list ::= create_table_list create_from_stable */
- -6, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
- -10, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
- -10, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
- -13, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
- -3, /* (149) tagNamelist ::= tagNamelist COMMA ids */
- -1, /* (150) tagNamelist ::= ids */
- -5, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */
- -3, /* (152) columnlist ::= columnlist COMMA column */
- -1, /* (153) columnlist ::= column */
- -2, /* (154) column ::= ids typename */
- -3, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */
- -1, /* (156) tagitemlist ::= tagitem */
- -1, /* (157) tagitem ::= INTEGER */
- -1, /* (158) tagitem ::= FLOAT */
- -1, /* (159) tagitem ::= STRING */
- -1, /* (160) tagitem ::= BOOL */
- -1, /* (161) tagitem ::= NULL */
- -1, /* (162) tagitem ::= NOW */
- -2, /* (163) tagitem ::= MINUS INTEGER */
- -2, /* (164) tagitem ::= MINUS FLOAT */
- -2, /* (165) tagitem ::= PLUS INTEGER */
- -2, /* (166) tagitem ::= PLUS FLOAT */
- -14, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
- -3, /* (168) select ::= LP select RP */
- -1, /* (169) union ::= select */
- -4, /* (170) union ::= union UNION ALL select */
- -1, /* (171) cmd ::= union */
- -2, /* (172) select ::= SELECT selcollist */
- -2, /* (173) sclp ::= selcollist COMMA */
- 0, /* (174) sclp ::= */
- -4, /* (175) selcollist ::= sclp distinct expr as */
- -2, /* (176) selcollist ::= sclp STAR */
- -2, /* (177) as ::= AS ids */
- -1, /* (178) as ::= ids */
- 0, /* (179) as ::= */
- -1, /* (180) distinct ::= DISTINCT */
- 0, /* (181) distinct ::= */
- -2, /* (182) from ::= FROM tablelist */
- -2, /* (183) from ::= FROM sub */
- -3, /* (184) sub ::= LP union RP */
- -4, /* (185) sub ::= LP union RP ids */
- -6, /* (186) sub ::= sub COMMA LP union RP ids */
- -2, /* (187) tablelist ::= ids cpxName */
- -3, /* (188) tablelist ::= ids cpxName ids */
- -4, /* (189) tablelist ::= tablelist COMMA ids cpxName */
- -5, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */
- -1, /* (191) tmvar ::= VARIABLE */
- -4, /* (192) interval_option ::= intervalKey LP tmvar RP */
- -6, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
- 0, /* (194) interval_option ::= */
- -1, /* (195) intervalKey ::= INTERVAL */
- -1, /* (196) intervalKey ::= EVERY */
- 0, /* (197) session_option ::= */
- -7, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
- 0, /* (199) windowstate_option ::= */
- -4, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */
- 0, /* (201) fill_opt ::= */
- -6, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
- -4, /* (203) fill_opt ::= FILL LP ID RP */
- -4, /* (204) sliding_opt ::= SLIDING LP tmvar RP */
- 0, /* (205) sliding_opt ::= */
- 0, /* (206) orderby_opt ::= */
- -3, /* (207) orderby_opt ::= ORDER BY sortlist */
- -4, /* (208) sortlist ::= sortlist COMMA item sortorder */
- -2, /* (209) sortlist ::= item sortorder */
- -2, /* (210) item ::= ids cpxName */
- -1, /* (211) sortorder ::= ASC */
- -1, /* (212) sortorder ::= DESC */
- 0, /* (213) sortorder ::= */
- 0, /* (214) groupby_opt ::= */
- -3, /* (215) groupby_opt ::= GROUP BY grouplist */
- -3, /* (216) grouplist ::= grouplist COMMA item */
- -1, /* (217) grouplist ::= item */
- 0, /* (218) having_opt ::= */
- -2, /* (219) having_opt ::= HAVING expr */
- 0, /* (220) limit_opt ::= */
- -2, /* (221) limit_opt ::= LIMIT signed */
- -4, /* (222) limit_opt ::= LIMIT signed OFFSET signed */
- -4, /* (223) limit_opt ::= LIMIT signed COMMA signed */
- 0, /* (224) slimit_opt ::= */
- -2, /* (225) slimit_opt ::= SLIMIT signed */
- -4, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */
- -4, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */
- 0, /* (228) where_opt ::= */
- -2, /* (229) where_opt ::= WHERE expr */
- -3, /* (230) expr ::= LP expr RP */
- -1, /* (231) expr ::= ID */
- -3, /* (232) expr ::= ID DOT ID */
- -3, /* (233) expr ::= ID DOT STAR */
- -1, /* (234) expr ::= INTEGER */
- -2, /* (235) expr ::= MINUS INTEGER */
- -2, /* (236) expr ::= PLUS INTEGER */
- -1, /* (237) expr ::= FLOAT */
- -2, /* (238) expr ::= MINUS FLOAT */
- -2, /* (239) expr ::= PLUS FLOAT */
- -1, /* (240) expr ::= STRING */
- -1, /* (241) expr ::= NOW */
- -1, /* (242) expr ::= VARIABLE */
- -2, /* (243) expr ::= PLUS VARIABLE */
- -2, /* (244) expr ::= MINUS VARIABLE */
- -1, /* (245) expr ::= BOOL */
- -1, /* (246) expr ::= NULL */
- -4, /* (247) expr ::= ID LP exprlist RP */
- -4, /* (248) expr ::= ID LP STAR RP */
- -3, /* (249) expr ::= expr IS NULL */
- -4, /* (250) expr ::= expr IS NOT NULL */
- -3, /* (251) expr ::= expr LT expr */
- -3, /* (252) expr ::= expr GT expr */
- -3, /* (253) expr ::= expr LE expr */
- -3, /* (254) expr ::= expr GE expr */
- -3, /* (255) expr ::= expr NE expr */
- -3, /* (256) expr ::= expr EQ expr */
- -5, /* (257) expr ::= expr BETWEEN expr AND expr */
- -3, /* (258) expr ::= expr AND expr */
- -3, /* (259) expr ::= expr OR expr */
- -3, /* (260) expr ::= expr PLUS expr */
- -3, /* (261) expr ::= expr MINUS expr */
- -3, /* (262) expr ::= expr STAR expr */
- -3, /* (263) expr ::= expr SLASH expr */
- -3, /* (264) expr ::= expr REM expr */
- -3, /* (265) expr ::= expr LIKE expr */
- -3, /* (266) expr ::= expr MATCH expr */
- -3, /* (267) expr ::= expr NMATCH expr */
- -5, /* (268) expr ::= expr IN LP exprlist RP */
- -3, /* (269) exprlist ::= exprlist COMMA expritem */
- -1, /* (270) exprlist ::= expritem */
- -1, /* (271) expritem ::= expr */
- 0, /* (272) expritem ::= */
- -3, /* (273) cmd ::= RESET QUERY CACHE */
- -3, /* (274) cmd ::= SYNCDB ids REPLICA */
- -7, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
- -7, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
- -7, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
- -7, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
- -7, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
- -8, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
- -9, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
- -7, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
- -7, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
- -7, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
- -7, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
- -7, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
- -7, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
- -8, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
- -9, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
- -7, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
- -3, /* (291) cmd ::= KILL CONNECTION INTEGER */
- -5, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */
- -5, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */
+/* The following table contains information about every rule that
+** is used during the reduce.
+*/
+static const struct {
+ YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
+ signed char nrhs; /* Negative of the number of RHS symbols in the rule */
+} yyRuleInfo[] = {
+ { 199, -1 }, /* (0) program ::= cmd */
+ { 200, -2 }, /* (1) cmd ::= SHOW DATABASES */
+ { 200, -2 }, /* (2) cmd ::= SHOW TOPICS */
+ { 200, -2 }, /* (3) cmd ::= SHOW FUNCTIONS */
+ { 200, -2 }, /* (4) cmd ::= SHOW MNODES */
+ { 200, -2 }, /* (5) cmd ::= SHOW DNODES */
+ { 200, -2 }, /* (6) cmd ::= SHOW ACCOUNTS */
+ { 200, -2 }, /* (7) cmd ::= SHOW USERS */
+ { 200, -2 }, /* (8) cmd ::= SHOW MODULES */
+ { 200, -2 }, /* (9) cmd ::= SHOW QUERIES */
+ { 200, -2 }, /* (10) cmd ::= SHOW CONNECTIONS */
+ { 200, -2 }, /* (11) cmd ::= SHOW STREAMS */
+ { 200, -2 }, /* (12) cmd ::= SHOW VARIABLES */
+ { 200, -2 }, /* (13) cmd ::= SHOW SCORES */
+ { 200, -2 }, /* (14) cmd ::= SHOW GRANTS */
+ { 200, -2 }, /* (15) cmd ::= SHOW VNODES */
+ { 200, -3 }, /* (16) cmd ::= SHOW VNODES ids */
+ { 202, 0 }, /* (17) dbPrefix ::= */
+ { 202, -2 }, /* (18) dbPrefix ::= ids DOT */
+ { 203, 0 }, /* (19) cpxName ::= */
+ { 203, -2 }, /* (20) cpxName ::= DOT ids */
+ { 200, -5 }, /* (21) cmd ::= SHOW CREATE TABLE ids cpxName */
+ { 200, -5 }, /* (22) cmd ::= SHOW CREATE STABLE ids cpxName */
+ { 200, -4 }, /* (23) cmd ::= SHOW CREATE DATABASE ids */
+ { 200, -3 }, /* (24) cmd ::= SHOW dbPrefix TABLES */
+ { 200, -5 }, /* (25) cmd ::= SHOW dbPrefix TABLES LIKE ids */
+ { 200, -3 }, /* (26) cmd ::= SHOW dbPrefix STABLES */
+ { 200, -5 }, /* (27) cmd ::= SHOW dbPrefix STABLES LIKE ids */
+ { 200, -3 }, /* (28) cmd ::= SHOW dbPrefix VGROUPS */
+ { 200, -4 }, /* (29) cmd ::= SHOW dbPrefix VGROUPS ids */
+ { 200, -5 }, /* (30) cmd ::= DROP TABLE ifexists ids cpxName */
+ { 200, -5 }, /* (31) cmd ::= DROP STABLE ifexists ids cpxName */
+ { 200, -4 }, /* (32) cmd ::= DROP DATABASE ifexists ids */
+ { 200, -4 }, /* (33) cmd ::= DROP TOPIC ifexists ids */
+ { 200, -3 }, /* (34) cmd ::= DROP FUNCTION ids */
+ { 200, -3 }, /* (35) cmd ::= DROP DNODE ids */
+ { 200, -3 }, /* (36) cmd ::= DROP USER ids */
+ { 200, -3 }, /* (37) cmd ::= DROP ACCOUNT ids */
+ { 200, -2 }, /* (38) cmd ::= USE ids */
+ { 200, -3 }, /* (39) cmd ::= DESCRIBE ids cpxName */
+ { 200, -3 }, /* (40) cmd ::= DESC ids cpxName */
+ { 200, -5 }, /* (41) cmd ::= ALTER USER ids PASS ids */
+ { 200, -5 }, /* (42) cmd ::= ALTER USER ids PRIVILEGE ids */
+ { 200, -4 }, /* (43) cmd ::= ALTER DNODE ids ids */
+ { 200, -5 }, /* (44) cmd ::= ALTER DNODE ids ids ids */
+ { 200, -3 }, /* (45) cmd ::= ALTER LOCAL ids */
+ { 200, -4 }, /* (46) cmd ::= ALTER LOCAL ids ids */
+ { 200, -4 }, /* (47) cmd ::= ALTER DATABASE ids alter_db_optr */
+ { 200, -4 }, /* (48) cmd ::= ALTER TOPIC ids alter_topic_optr */
+ { 200, -4 }, /* (49) cmd ::= ALTER ACCOUNT ids acct_optr */
+ { 200, -6 }, /* (50) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
+ { 200, -6 }, /* (51) cmd ::= COMPACT VNODES IN LP exprlist RP */
+ { 201, -1 }, /* (52) ids ::= ID */
+ { 201, -1 }, /* (53) ids ::= STRING */
+ { 204, -2 }, /* (54) ifexists ::= IF EXISTS */
+ { 204, 0 }, /* (55) ifexists ::= */
+ { 209, -3 }, /* (56) ifnotexists ::= IF NOT EXISTS */
+ { 209, 0 }, /* (57) ifnotexists ::= */
+ { 200, -3 }, /* (58) cmd ::= CREATE DNODE ids */
+ { 200, -6 }, /* (59) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
+ { 200, -5 }, /* (60) cmd ::= CREATE DATABASE ifnotexists ids db_optr */
+ { 200, -5 }, /* (61) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */
+ { 200, -8 }, /* (62) cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ { 200, -9 }, /* (63) cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
+ { 200, -5 }, /* (64) cmd ::= CREATE USER ids PASS ids */
+ { 213, 0 }, /* (65) bufsize ::= */
+ { 213, -2 }, /* (66) bufsize ::= BUFSIZE INTEGER */
+ { 214, 0 }, /* (67) pps ::= */
+ { 214, -2 }, /* (68) pps ::= PPS INTEGER */
+ { 215, 0 }, /* (69) tseries ::= */
+ { 215, -2 }, /* (70) tseries ::= TSERIES INTEGER */
+ { 216, 0 }, /* (71) dbs ::= */
+ { 216, -2 }, /* (72) dbs ::= DBS INTEGER */
+ { 217, 0 }, /* (73) streams ::= */
+ { 217, -2 }, /* (74) streams ::= STREAMS INTEGER */
+ { 218, 0 }, /* (75) storage ::= */
+ { 218, -2 }, /* (76) storage ::= STORAGE INTEGER */
+ { 219, 0 }, /* (77) qtime ::= */
+ { 219, -2 }, /* (78) qtime ::= QTIME INTEGER */
+ { 220, 0 }, /* (79) users ::= */
+ { 220, -2 }, /* (80) users ::= USERS INTEGER */
+ { 221, 0 }, /* (81) conns ::= */
+ { 221, -2 }, /* (82) conns ::= CONNS INTEGER */
+ { 222, 0 }, /* (83) state ::= */
+ { 222, -2 }, /* (84) state ::= STATE ids */
+ { 207, -9 }, /* (85) acct_optr ::= pps tseries storage streams qtime dbs users conns state */
+ { 223, -3 }, /* (86) intitemlist ::= intitemlist COMMA intitem */
+ { 223, -1 }, /* (87) intitemlist ::= intitem */
+ { 224, -1 }, /* (88) intitem ::= INTEGER */
+ { 225, -2 }, /* (89) keep ::= KEEP intitemlist */
+ { 226, -2 }, /* (90) cache ::= CACHE INTEGER */
+ { 227, -2 }, /* (91) replica ::= REPLICA INTEGER */
+ { 228, -2 }, /* (92) quorum ::= QUORUM INTEGER */
+ { 229, -2 }, /* (93) days ::= DAYS INTEGER */
+ { 230, -2 }, /* (94) minrows ::= MINROWS INTEGER */
+ { 231, -2 }, /* (95) maxrows ::= MAXROWS INTEGER */
+ { 232, -2 }, /* (96) blocks ::= BLOCKS INTEGER */
+ { 233, -2 }, /* (97) ctime ::= CTIME INTEGER */
+ { 234, -2 }, /* (98) wal ::= WAL INTEGER */
+ { 235, -2 }, /* (99) fsync ::= FSYNC INTEGER */
+ { 236, -2 }, /* (100) comp ::= COMP INTEGER */
+ { 237, -2 }, /* (101) prec ::= PRECISION STRING */
+ { 238, -2 }, /* (102) update ::= UPDATE INTEGER */
+ { 239, -2 }, /* (103) cachelast ::= CACHELAST INTEGER */
+ { 240, -2 }, /* (104) partitions ::= PARTITIONS INTEGER */
+ { 210, 0 }, /* (105) db_optr ::= */
+ { 210, -2 }, /* (106) db_optr ::= db_optr cache */
+ { 210, -2 }, /* (107) db_optr ::= db_optr replica */
+ { 210, -2 }, /* (108) db_optr ::= db_optr quorum */
+ { 210, -2 }, /* (109) db_optr ::= db_optr days */
+ { 210, -2 }, /* (110) db_optr ::= db_optr minrows */
+ { 210, -2 }, /* (111) db_optr ::= db_optr maxrows */
+ { 210, -2 }, /* (112) db_optr ::= db_optr blocks */
+ { 210, -2 }, /* (113) db_optr ::= db_optr ctime */
+ { 210, -2 }, /* (114) db_optr ::= db_optr wal */
+ { 210, -2 }, /* (115) db_optr ::= db_optr fsync */
+ { 210, -2 }, /* (116) db_optr ::= db_optr comp */
+ { 210, -2 }, /* (117) db_optr ::= db_optr prec */
+ { 210, -2 }, /* (118) db_optr ::= db_optr keep */
+ { 210, -2 }, /* (119) db_optr ::= db_optr update */
+ { 210, -2 }, /* (120) db_optr ::= db_optr cachelast */
+ { 211, -1 }, /* (121) topic_optr ::= db_optr */
+ { 211, -2 }, /* (122) topic_optr ::= topic_optr partitions */
+ { 205, 0 }, /* (123) alter_db_optr ::= */
+ { 205, -2 }, /* (124) alter_db_optr ::= alter_db_optr replica */
+ { 205, -2 }, /* (125) alter_db_optr ::= alter_db_optr quorum */
+ { 205, -2 }, /* (126) alter_db_optr ::= alter_db_optr keep */
+ { 205, -2 }, /* (127) alter_db_optr ::= alter_db_optr blocks */
+ { 205, -2 }, /* (128) alter_db_optr ::= alter_db_optr comp */
+ { 205, -2 }, /* (129) alter_db_optr ::= alter_db_optr update */
+ { 205, -2 }, /* (130) alter_db_optr ::= alter_db_optr cachelast */
+ { 206, -1 }, /* (131) alter_topic_optr ::= alter_db_optr */
+ { 206, -2 }, /* (132) alter_topic_optr ::= alter_topic_optr partitions */
+ { 212, -1 }, /* (133) typename ::= ids */
+ { 212, -4 }, /* (134) typename ::= ids LP signed RP */
+ { 212, -2 }, /* (135) typename ::= ids UNSIGNED */
+ { 241, -1 }, /* (136) signed ::= INTEGER */
+ { 241, -2 }, /* (137) signed ::= PLUS INTEGER */
+ { 241, -2 }, /* (138) signed ::= MINUS INTEGER */
+ { 200, -3 }, /* (139) cmd ::= CREATE TABLE create_table_args */
+ { 200, -3 }, /* (140) cmd ::= CREATE TABLE create_stable_args */
+ { 200, -3 }, /* (141) cmd ::= CREATE STABLE create_stable_args */
+ { 200, -3 }, /* (142) cmd ::= CREATE TABLE create_table_list */
+ { 244, -1 }, /* (143) create_table_list ::= create_from_stable */
+ { 244, -2 }, /* (144) create_table_list ::= create_table_list create_from_stable */
+ { 242, -6 }, /* (145) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
+ { 243, -10 }, /* (146) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
+ { 245, -10 }, /* (147) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
+ { 245, -13 }, /* (148) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
+ { 248, -3 }, /* (149) tagNamelist ::= tagNamelist COMMA ids */
+ { 248, -1 }, /* (150) tagNamelist ::= ids */
+ { 242, -5 }, /* (151) create_table_args ::= ifnotexists ids cpxName AS select */
+ { 246, -3 }, /* (152) columnlist ::= columnlist COMMA column */
+ { 246, -1 }, /* (153) columnlist ::= column */
+ { 250, -2 }, /* (154) column ::= ids typename */
+ { 247, -3 }, /* (155) tagitemlist ::= tagitemlist COMMA tagitem */
+ { 247, -1 }, /* (156) tagitemlist ::= tagitem */
+ { 251, -1 }, /* (157) tagitem ::= INTEGER */
+ { 251, -1 }, /* (158) tagitem ::= FLOAT */
+ { 251, -1 }, /* (159) tagitem ::= STRING */
+ { 251, -1 }, /* (160) tagitem ::= BOOL */
+ { 251, -1 }, /* (161) tagitem ::= NULL */
+ { 251, -1 }, /* (162) tagitem ::= NOW */
+ { 251, -2 }, /* (163) tagitem ::= MINUS INTEGER */
+ { 251, -2 }, /* (164) tagitem ::= MINUS FLOAT */
+ { 251, -2 }, /* (165) tagitem ::= PLUS INTEGER */
+ { 251, -2 }, /* (166) tagitem ::= PLUS FLOAT */
+ { 249, -14 }, /* (167) select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
+ { 249, -3 }, /* (168) select ::= LP select RP */
+ { 265, -1 }, /* (169) union ::= select */
+ { 265, -4 }, /* (170) union ::= union UNION ALL select */
+ { 200, -1 }, /* (171) cmd ::= union */
+ { 249, -2 }, /* (172) select ::= SELECT selcollist */
+ { 266, -2 }, /* (173) sclp ::= selcollist COMMA */
+ { 266, 0 }, /* (174) sclp ::= */
+ { 252, -4 }, /* (175) selcollist ::= sclp distinct expr as */
+ { 252, -2 }, /* (176) selcollist ::= sclp STAR */
+ { 269, -2 }, /* (177) as ::= AS ids */
+ { 269, -1 }, /* (178) as ::= ids */
+ { 269, 0 }, /* (179) as ::= */
+ { 267, -1 }, /* (180) distinct ::= DISTINCT */
+ { 267, 0 }, /* (181) distinct ::= */
+ { 253, -2 }, /* (182) from ::= FROM tablelist */
+ { 253, -2 }, /* (183) from ::= FROM sub */
+ { 271, -3 }, /* (184) sub ::= LP union RP */
+ { 271, -4 }, /* (185) sub ::= LP union RP ids */
+ { 271, -6 }, /* (186) sub ::= sub COMMA LP union RP ids */
+ { 270, -2 }, /* (187) tablelist ::= ids cpxName */
+ { 270, -3 }, /* (188) tablelist ::= ids cpxName ids */
+ { 270, -4 }, /* (189) tablelist ::= tablelist COMMA ids cpxName */
+ { 270, -5 }, /* (190) tablelist ::= tablelist COMMA ids cpxName ids */
+ { 272, -1 }, /* (191) tmvar ::= VARIABLE */
+ { 255, -4 }, /* (192) interval_option ::= intervalKey LP tmvar RP */
+ { 255, -6 }, /* (193) interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
+ { 255, 0 }, /* (194) interval_option ::= */
+ { 273, -1 }, /* (195) intervalKey ::= INTERVAL */
+ { 273, -1 }, /* (196) intervalKey ::= EVERY */
+ { 257, 0 }, /* (197) session_option ::= */
+ { 257, -7 }, /* (198) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
+ { 258, 0 }, /* (199) windowstate_option ::= */
+ { 258, -4 }, /* (200) windowstate_option ::= STATE_WINDOW LP ids RP */
+ { 259, 0 }, /* (201) fill_opt ::= */
+ { 259, -6 }, /* (202) fill_opt ::= FILL LP ID COMMA tagitemlist RP */
+ { 259, -4 }, /* (203) fill_opt ::= FILL LP ID RP */
+ { 256, -4 }, /* (204) sliding_opt ::= SLIDING LP tmvar RP */
+ { 256, 0 }, /* (205) sliding_opt ::= */
+ { 262, 0 }, /* (206) orderby_opt ::= */
+ { 262, -3 }, /* (207) orderby_opt ::= ORDER BY sortlist */
+ { 274, -4 }, /* (208) sortlist ::= sortlist COMMA item sortorder */
+ { 274, -2 }, /* (209) sortlist ::= item sortorder */
+ { 276, -2 }, /* (210) item ::= ids cpxName */
+ { 277, -1 }, /* (211) sortorder ::= ASC */
+ { 277, -1 }, /* (212) sortorder ::= DESC */
+ { 277, 0 }, /* (213) sortorder ::= */
+ { 260, 0 }, /* (214) groupby_opt ::= */
+ { 260, -3 }, /* (215) groupby_opt ::= GROUP BY grouplist */
+ { 278, -3 }, /* (216) grouplist ::= grouplist COMMA item */
+ { 278, -1 }, /* (217) grouplist ::= item */
+ { 261, 0 }, /* (218) having_opt ::= */
+ { 261, -2 }, /* (219) having_opt ::= HAVING expr */
+ { 264, 0 }, /* (220) limit_opt ::= */
+ { 264, -2 }, /* (221) limit_opt ::= LIMIT signed */
+ { 264, -4 }, /* (222) limit_opt ::= LIMIT signed OFFSET signed */
+ { 264, -4 }, /* (223) limit_opt ::= LIMIT signed COMMA signed */
+ { 263, 0 }, /* (224) slimit_opt ::= */
+ { 263, -2 }, /* (225) slimit_opt ::= SLIMIT signed */
+ { 263, -4 }, /* (226) slimit_opt ::= SLIMIT signed SOFFSET signed */
+ { 263, -4 }, /* (227) slimit_opt ::= SLIMIT signed COMMA signed */
+ { 254, 0 }, /* (228) where_opt ::= */
+ { 254, -2 }, /* (229) where_opt ::= WHERE expr */
+ { 268, -3 }, /* (230) expr ::= LP expr RP */
+ { 268, -1 }, /* (231) expr ::= ID */
+ { 268, -3 }, /* (232) expr ::= ID DOT ID */
+ { 268, -3 }, /* (233) expr ::= ID DOT STAR */
+ { 268, -1 }, /* (234) expr ::= INTEGER */
+ { 268, -2 }, /* (235) expr ::= MINUS INTEGER */
+ { 268, -2 }, /* (236) expr ::= PLUS INTEGER */
+ { 268, -1 }, /* (237) expr ::= FLOAT */
+ { 268, -2 }, /* (238) expr ::= MINUS FLOAT */
+ { 268, -2 }, /* (239) expr ::= PLUS FLOAT */
+ { 268, -1 }, /* (240) expr ::= STRING */
+ { 268, -1 }, /* (241) expr ::= NOW */
+ { 268, -1 }, /* (242) expr ::= VARIABLE */
+ { 268, -2 }, /* (243) expr ::= PLUS VARIABLE */
+ { 268, -2 }, /* (244) expr ::= MINUS VARIABLE */
+ { 268, -1 }, /* (245) expr ::= BOOL */
+ { 268, -1 }, /* (246) expr ::= NULL */
+ { 268, -4 }, /* (247) expr ::= ID LP exprlist RP */
+ { 268, -4 }, /* (248) expr ::= ID LP STAR RP */
+ { 268, -3 }, /* (249) expr ::= expr IS NULL */
+ { 268, -4 }, /* (250) expr ::= expr IS NOT NULL */
+ { 268, -3 }, /* (251) expr ::= expr LT expr */
+ { 268, -3 }, /* (252) expr ::= expr GT expr */
+ { 268, -3 }, /* (253) expr ::= expr LE expr */
+ { 268, -3 }, /* (254) expr ::= expr GE expr */
+ { 268, -3 }, /* (255) expr ::= expr NE expr */
+ { 268, -3 }, /* (256) expr ::= expr EQ expr */
+ { 268, -5 }, /* (257) expr ::= expr BETWEEN expr AND expr */
+ { 268, -3 }, /* (258) expr ::= expr AND expr */
+ { 268, -3 }, /* (259) expr ::= expr OR expr */
+ { 268, -3 }, /* (260) expr ::= expr PLUS expr */
+ { 268, -3 }, /* (261) expr ::= expr MINUS expr */
+ { 268, -3 }, /* (262) expr ::= expr STAR expr */
+ { 268, -3 }, /* (263) expr ::= expr SLASH expr */
+ { 268, -3 }, /* (264) expr ::= expr REM expr */
+ { 268, -3 }, /* (265) expr ::= expr LIKE expr */
+ { 268, -3 }, /* (266) expr ::= expr MATCH expr */
+ { 268, -3 }, /* (267) expr ::= expr NMATCH expr */
+ { 268, -5 }, /* (268) expr ::= expr IN LP exprlist RP */
+ { 208, -3 }, /* (269) exprlist ::= exprlist COMMA expritem */
+ { 208, -1 }, /* (270) exprlist ::= expritem */
+ { 279, -1 }, /* (271) expritem ::= expr */
+ { 279, 0 }, /* (272) expritem ::= */
+ { 200, -3 }, /* (273) cmd ::= RESET QUERY CACHE */
+ { 200, -3 }, /* (274) cmd ::= SYNCDB ids REPLICA */
+ { 200, -7 }, /* (275) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
+ { 200, -7 }, /* (276) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */
+ { 200, -7 }, /* (277) cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
+ { 200, -7 }, /* (278) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
+ { 200, -7 }, /* (279) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */
+ { 200, -8 }, /* (280) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */
+ { 200, -9 }, /* (281) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */
+ { 200, -7 }, /* (282) cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
+ { 200, -7 }, /* (283) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
+ { 200, -7 }, /* (284) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */
+ { 200, -7 }, /* (285) cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
+ { 200, -7 }, /* (286) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
+ { 200, -7 }, /* (287) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */
+ { 200, -8 }, /* (288) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */
+ { 200, -9 }, /* (289) cmd ::= ALTER STABLE ids cpxName SET TAG ids EQ tagitem */
+ { 200, -7 }, /* (290) cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
+ { 200, -3 }, /* (291) cmd ::= KILL CONNECTION INTEGER */
+ { 200, -5 }, /* (292) cmd ::= KILL STREAM INTEGER COLON INTEGER */
+ { 200, -5 }, /* (293) cmd ::= KILL QUERY INTEGER COLON INTEGER */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -2458,34 +2151,30 @@ static void yy_accept(yyParser*); /* Forward Declaration */
** only called from one place, optimizing compilers will in-line it, which
** means that the extra parameters have no performance impact.
*/
-static YYACTIONTYPE yy_reduce(
+static void yy_reduce(
yyParser *yypParser, /* The parser */
unsigned int yyruleno, /* Number of the rule by which to reduce */
int yyLookahead, /* Lookahead token, or YYNOCODE if none */
ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */
- ParseCTX_PDECL /* %extra_context */
){
int yygoto; /* The next state */
- YYACTIONTYPE yyact; /* The next action */
+ int yyact; /* The next action */
yyStackEntry *yymsp; /* The top of the parser's stack */
int yysize; /* Amount to pop the stack */
- ParseARG_FETCH
+ ParseARG_FETCH;
(void)yyLookahead;
(void)yyLookaheadToken;
yymsp = yypParser->yytos;
#ifndef NDEBUG
if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){
- yysize = yyRuleInfoNRhs[yyruleno];
+ yysize = yyRuleInfo[yyruleno].nrhs;
if( yysize ){
- fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n",
+ fprintf(yyTraceFILE, "%sReduce %d [%s], go to state %d.\n",
yyTracePrompt,
- yyruleno, yyRuleName[yyruleno],
- yyrulenoyytos - yypParser->yystack)>yypParser->yyhwm ){
yypParser->yyhwm++;
@@ -2503,19 +2192,13 @@ static YYACTIONTYPE yy_reduce(
#if YYSTACKDEPTH>0
if( yypParser->yytos>=yypParser->yystackEnd ){
yyStackOverflow(yypParser);
- /* The call to yyStackOverflow() above pops the stack until it is
- ** empty, causing the main parser loop to exit. So the return value
- ** is never used and does not matter. */
- return 0;
+ return;
}
#else
if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){
if( yyGrowStack(yypParser) ){
yyStackOverflow(yypParser);
- /* The call to yyStackOverflow() above pops the stack until it is
- ** empty, causing the main parser loop to exit. So the return value
- ** is never used and does not matter. */
- return 0;
+ return;
}
yymsp = yypParser->yytos;
}
@@ -2713,16 +2396,16 @@ static YYACTIONTYPE yy_reduce(
break;
case 47: /* cmd ::= ALTER DATABASE ids alter_db_optr */
case 48: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==48);
-{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy90, &t);}
+{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy142, &t);}
break;
case 49: /* cmd ::= ALTER ACCOUNT ids acct_optr */
-{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy171);}
+{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy491);}
break;
case 50: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */
-{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy171);}
+{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy491);}
break;
case 51: /* cmd ::= COMPACT VNODES IN LP exprlist RP */
-{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy421);}
+{ setCompactVnodeSql(pInfo, TSDB_SQL_COMPACT_VNODE, yymsp[-1].minor.yy221);}
break;
case 52: /* ids ::= ID */
case 53: /* ids ::= STRING */ yytestcase(yyruleno==53);
@@ -2744,17 +2427,17 @@ static YYACTIONTYPE yy_reduce(
{ setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);}
break;
case 59: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */
-{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy171);}
+{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy491);}
break;
case 60: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */
case 61: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==61);
-{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy90, &yymsp[-2].minor.yy0);}
+{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy142, &yymsp[-2].minor.yy0);}
break;
case 62: /* cmd ::= CREATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
-{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy183, &yymsp[0].minor.yy0, 1);}
+{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy503, &yymsp[0].minor.yy0, 1);}
break;
case 63: /* cmd ::= CREATE AGGREGATE FUNCTION ids AS ids OUTPUTTYPE typename bufsize */
-{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy183, &yymsp[0].minor.yy0, 2);}
+{ setCreateFuncInfo(pInfo, TSDB_SQL_CREATE_FUNCTION, &yymsp[-5].minor.yy0, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy503, &yymsp[0].minor.yy0, 2);}
break;
case 64: /* cmd ::= CREATE USER ids PASS ids */
{ setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);}
@@ -2785,38 +2468,38 @@ static YYACTIONTYPE yy_reduce(
break;
case 85: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */
{
- yylhsminor.yy171.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1;
- yylhsminor.yy171.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1;
- yylhsminor.yy171.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1;
- yylhsminor.yy171.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1;
- yylhsminor.yy171.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1;
- yylhsminor.yy171.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1;
- yylhsminor.yy171.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1;
- yylhsminor.yy171.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1;
- yylhsminor.yy171.stat = yymsp[0].minor.yy0;
+ yylhsminor.yy491.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1;
+ yylhsminor.yy491.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1;
+ yylhsminor.yy491.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1;
+ yylhsminor.yy491.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1;
+ yylhsminor.yy491.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1;
+ yylhsminor.yy491.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1;
+ yylhsminor.yy491.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1;
+ yylhsminor.yy491.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1;
+ yylhsminor.yy491.stat = yymsp[0].minor.yy0;
}
- yymsp[-8].minor.yy171 = yylhsminor.yy171;
+ yymsp[-8].minor.yy491 = yylhsminor.yy491;
break;
case 86: /* intitemlist ::= intitemlist COMMA intitem */
case 155: /* tagitemlist ::= tagitemlist COMMA tagitem */ yytestcase(yyruleno==155);
-{ yylhsminor.yy421 = tVariantListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy430, -1); }
- yymsp[-2].minor.yy421 = yylhsminor.yy421;
+{ yylhsminor.yy221 = tVariantListAppend(yymsp[-2].minor.yy221, &yymsp[0].minor.yy106, -1); }
+ yymsp[-2].minor.yy221 = yylhsminor.yy221;
break;
case 87: /* intitemlist ::= intitem */
case 156: /* tagitemlist ::= tagitem */ yytestcase(yyruleno==156);
-{ yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[0].minor.yy430, -1); }
- yymsp[0].minor.yy421 = yylhsminor.yy421;
+{ yylhsminor.yy221 = tVariantListAppend(NULL, &yymsp[0].minor.yy106, -1); }
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 88: /* intitem ::= INTEGER */
case 157: /* tagitem ::= INTEGER */ yytestcase(yyruleno==157);
case 158: /* tagitem ::= FLOAT */ yytestcase(yyruleno==158);
case 159: /* tagitem ::= STRING */ yytestcase(yyruleno==159);
case 160: /* tagitem ::= BOOL */ yytestcase(yyruleno==160);
-{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy430 = yylhsminor.yy430;
+{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy106, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy106 = yylhsminor.yy106;
break;
case 89: /* keep ::= KEEP intitemlist */
-{ yymsp[-1].minor.yy421 = yymsp[0].minor.yy421; }
+{ yymsp[-1].minor.yy221 = yymsp[0].minor.yy221; }
break;
case 90: /* cache ::= CACHE INTEGER */
case 91: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==91);
@@ -2836,221 +2519,221 @@ static YYACTIONTYPE yy_reduce(
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
break;
case 105: /* db_optr ::= */
-{setDefaultCreateDbOption(&yymsp[1].minor.yy90); yymsp[1].minor.yy90.dbType = TSDB_DB_TYPE_DEFAULT;}
+{setDefaultCreateDbOption(&yymsp[1].minor.yy142); yymsp[1].minor.yy142.dbType = TSDB_DB_TYPE_DEFAULT;}
break;
case 106: /* db_optr ::= db_optr cache */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 107: /* db_optr ::= db_optr replica */
case 124: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==124);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 108: /* db_optr ::= db_optr quorum */
case 125: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==125);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 109: /* db_optr ::= db_optr days */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 110: /* db_optr ::= db_optr minrows */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 111: /* db_optr ::= db_optr maxrows */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 112: /* db_optr ::= db_optr blocks */
case 127: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==127);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 113: /* db_optr ::= db_optr ctime */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 114: /* db_optr ::= db_optr wal */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 115: /* db_optr ::= db_optr fsync */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 116: /* db_optr ::= db_optr comp */
case 128: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==128);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 117: /* db_optr ::= db_optr prec */
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.precision = yymsp[0].minor.yy0; }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.precision = yymsp[0].minor.yy0; }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 118: /* db_optr ::= db_optr keep */
case 126: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==126);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.keep = yymsp[0].minor.yy421; }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.keep = yymsp[0].minor.yy221; }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 119: /* db_optr ::= db_optr update */
case 129: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==129);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 120: /* db_optr ::= db_optr cachelast */
case 130: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==130);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 121: /* topic_optr ::= db_optr */
case 131: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==131);
-{ yylhsminor.yy90 = yymsp[0].minor.yy90; yylhsminor.yy90.dbType = TSDB_DB_TYPE_TOPIC; }
- yymsp[0].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[0].minor.yy142; yylhsminor.yy142.dbType = TSDB_DB_TYPE_TOPIC; }
+ yymsp[0].minor.yy142 = yylhsminor.yy142;
break;
case 122: /* topic_optr ::= topic_optr partitions */
case 132: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==132);
-{ yylhsminor.yy90 = yymsp[-1].minor.yy90; yylhsminor.yy90.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[-1].minor.yy90 = yylhsminor.yy90;
+{ yylhsminor.yy142 = yymsp[-1].minor.yy142; yylhsminor.yy142.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[-1].minor.yy142 = yylhsminor.yy142;
break;
case 123: /* alter_db_optr ::= */
-{ setDefaultCreateDbOption(&yymsp[1].minor.yy90); yymsp[1].minor.yy90.dbType = TSDB_DB_TYPE_DEFAULT;}
+{ setDefaultCreateDbOption(&yymsp[1].minor.yy142); yymsp[1].minor.yy142.dbType = TSDB_DB_TYPE_DEFAULT;}
break;
case 133: /* typename ::= ids */
{
yymsp[0].minor.yy0.type = 0;
- tSetColumnType (&yylhsminor.yy183, &yymsp[0].minor.yy0);
+ tSetColumnType (&yylhsminor.yy503, &yymsp[0].minor.yy0);
}
- yymsp[0].minor.yy183 = yylhsminor.yy183;
+ yymsp[0].minor.yy503 = yylhsminor.yy503;
break;
case 134: /* typename ::= ids LP signed RP */
{
- if (yymsp[-1].minor.yy325 <= 0) {
+ if (yymsp[-1].minor.yy109 <= 0) {
yymsp[-3].minor.yy0.type = 0;
- tSetColumnType(&yylhsminor.yy183, &yymsp[-3].minor.yy0);
+ tSetColumnType(&yylhsminor.yy503, &yymsp[-3].minor.yy0);
} else {
- yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy325; // negative value of name length
- tSetColumnType(&yylhsminor.yy183, &yymsp[-3].minor.yy0);
+ yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy109; // negative value of name length
+ tSetColumnType(&yylhsminor.yy503, &yymsp[-3].minor.yy0);
}
}
- yymsp[-3].minor.yy183 = yylhsminor.yy183;
+ yymsp[-3].minor.yy503 = yylhsminor.yy503;
break;
case 135: /* typename ::= ids UNSIGNED */
{
yymsp[-1].minor.yy0.type = 0;
yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z);
- tSetColumnType (&yylhsminor.yy183, &yymsp[-1].minor.yy0);
+ tSetColumnType (&yylhsminor.yy503, &yymsp[-1].minor.yy0);
}
- yymsp[-1].minor.yy183 = yylhsminor.yy183;
+ yymsp[-1].minor.yy503 = yylhsminor.yy503;
break;
case 136: /* signed ::= INTEGER */
-{ yylhsminor.yy325 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
- yymsp[0].minor.yy325 = yylhsminor.yy325;
+{ yylhsminor.yy109 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+ yymsp[0].minor.yy109 = yylhsminor.yy109;
break;
case 137: /* signed ::= PLUS INTEGER */
-{ yymsp[-1].minor.yy325 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
+{ yymsp[-1].minor.yy109 = strtol(yymsp[0].minor.yy0.z, NULL, 10); }
break;
case 138: /* signed ::= MINUS INTEGER */
-{ yymsp[-1].minor.yy325 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);}
+{ yymsp[-1].minor.yy109 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);}
break;
case 142: /* cmd ::= CREATE TABLE create_table_list */
-{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy438;}
+{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy102;}
break;
case 143: /* create_table_list ::= create_from_stable */
{
SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql));
pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo));
- taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy152);
+ taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy416);
pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE;
- yylhsminor.yy438 = pCreateTable;
+ yylhsminor.yy102 = pCreateTable;
}
- yymsp[0].minor.yy438 = yylhsminor.yy438;
+ yymsp[0].minor.yy102 = yylhsminor.yy102;
break;
case 144: /* create_table_list ::= create_table_list create_from_stable */
{
- taosArrayPush(yymsp[-1].minor.yy438->childTableInfo, &yymsp[0].minor.yy152);
- yylhsminor.yy438 = yymsp[-1].minor.yy438;
+ taosArrayPush(yymsp[-1].minor.yy102->childTableInfo, &yymsp[0].minor.yy416);
+ yylhsminor.yy102 = yymsp[-1].minor.yy102;
}
- yymsp[-1].minor.yy438 = yylhsminor.yy438;
+ yymsp[-1].minor.yy102 = yylhsminor.yy102;
break;
case 145: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */
{
- yylhsminor.yy438 = tSetCreateTableInfo(yymsp[-1].minor.yy421, NULL, NULL, TSQL_CREATE_TABLE);
- setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE);
+ yylhsminor.yy102 = tSetCreateTableInfo(yymsp[-1].minor.yy221, NULL, NULL, TSQL_CREATE_TABLE);
+ setSqlInfo(pInfo, yylhsminor.yy102, NULL, TSDB_SQL_CREATE_TABLE);
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0);
}
- yymsp[-5].minor.yy438 = yylhsminor.yy438;
+ yymsp[-5].minor.yy102 = yylhsminor.yy102;
break;
case 146: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */
{
- yylhsminor.yy438 = tSetCreateTableInfo(yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, NULL, TSQL_CREATE_STABLE);
- setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE);
+ yylhsminor.yy102 = tSetCreateTableInfo(yymsp[-5].minor.yy221, yymsp[-1].minor.yy221, NULL, TSQL_CREATE_STABLE);
+ setSqlInfo(pInfo, yylhsminor.yy102, NULL, TSDB_SQL_CREATE_TABLE);
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0);
}
- yymsp[-9].minor.yy438 = yylhsminor.yy438;
+ yymsp[-9].minor.yy102 = yylhsminor.yy102;
break;
case 147: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */
{
yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n;
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
- yylhsminor.yy152 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy421, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0);
+ yylhsminor.yy416 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy221, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0);
}
- yymsp[-9].minor.yy152 = yylhsminor.yy152;
+ yymsp[-9].minor.yy416 = yylhsminor.yy416;
break;
case 148: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */
{
yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n;
yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n;
- yylhsminor.yy152 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy421, yymsp[-1].minor.yy421, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0);
+ yylhsminor.yy416 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy221, yymsp[-1].minor.yy221, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0);
}
- yymsp[-12].minor.yy152 = yylhsminor.yy152;
+ yymsp[-12].minor.yy416 = yylhsminor.yy416;
break;
case 149: /* tagNamelist ::= tagNamelist COMMA ids */
-{taosArrayPush(yymsp[-2].minor.yy421, &yymsp[0].minor.yy0); yylhsminor.yy421 = yymsp[-2].minor.yy421; }
- yymsp[-2].minor.yy421 = yylhsminor.yy421;
+{taosArrayPush(yymsp[-2].minor.yy221, &yymsp[0].minor.yy0); yylhsminor.yy221 = yymsp[-2].minor.yy221; }
+ yymsp[-2].minor.yy221 = yylhsminor.yy221;
break;
case 150: /* tagNamelist ::= ids */
-{yylhsminor.yy421 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy421, &yymsp[0].minor.yy0);}
- yymsp[0].minor.yy421 = yylhsminor.yy421;
+{yylhsminor.yy221 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy221, &yymsp[0].minor.yy0);}
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 151: /* create_table_args ::= ifnotexists ids cpxName AS select */
{
- yylhsminor.yy438 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy56, TSQL_CREATE_STREAM);
- setSqlInfo(pInfo, yylhsminor.yy438, NULL, TSDB_SQL_CREATE_TABLE);
+ yylhsminor.yy102 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy376, TSQL_CREATE_STREAM);
+ setSqlInfo(pInfo, yylhsminor.yy102, NULL, TSDB_SQL_CREATE_TABLE);
yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n;
setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0);
}
- yymsp[-4].minor.yy438 = yylhsminor.yy438;
+ yymsp[-4].minor.yy102 = yylhsminor.yy102;
break;
case 152: /* columnlist ::= columnlist COMMA column */
-{taosArrayPush(yymsp[-2].minor.yy421, &yymsp[0].minor.yy183); yylhsminor.yy421 = yymsp[-2].minor.yy421; }
- yymsp[-2].minor.yy421 = yylhsminor.yy421;
+{taosArrayPush(yymsp[-2].minor.yy221, &yymsp[0].minor.yy503); yylhsminor.yy221 = yymsp[-2].minor.yy221; }
+ yymsp[-2].minor.yy221 = yylhsminor.yy221;
break;
case 153: /* columnlist ::= column */
-{yylhsminor.yy421 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy421, &yymsp[0].minor.yy183);}
- yymsp[0].minor.yy421 = yylhsminor.yy421;
+{yylhsminor.yy221 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy221, &yymsp[0].minor.yy503);}
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 154: /* column ::= ids typename */
{
- tSetColumnInfo(&yylhsminor.yy183, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy183);
+ tSetColumnInfo(&yylhsminor.yy503, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy503);
}
- yymsp[-1].minor.yy183 = yylhsminor.yy183;
+ yymsp[-1].minor.yy503 = yylhsminor.yy503;
break;
case 161: /* tagitem ::= NULL */
-{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy430 = yylhsminor.yy430;
+{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy106, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy106 = yylhsminor.yy106;
break;
case 162: /* tagitem ::= NOW */
-{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy430, &yymsp[0].minor.yy0);}
- yymsp[0].minor.yy430 = yylhsminor.yy430;
+{ yymsp[0].minor.yy0.type = TSDB_DATA_TYPE_TIMESTAMP; tVariantCreate(&yylhsminor.yy106, &yymsp[0].minor.yy0);}
+ yymsp[0].minor.yy106 = yylhsminor.yy106;
break;
case 163: /* tagitem ::= MINUS INTEGER */
case 164: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==164);
@@ -3060,56 +2743,56 @@ static YYACTIONTYPE yy_reduce(
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type;
toTSDBType(yymsp[-1].minor.yy0.type);
- tVariantCreate(&yylhsminor.yy430, &yymsp[-1].minor.yy0);
+ tVariantCreate(&yylhsminor.yy106, &yymsp[-1].minor.yy0);
}
- yymsp[-1].minor.yy430 = yylhsminor.yy430;
+ yymsp[-1].minor.yy106 = yylhsminor.yy106;
break;
case 167: /* select ::= SELECT selcollist from where_opt interval_option sliding_opt session_option windowstate_option fill_opt groupby_opt having_opt orderby_opt slimit_opt limit_opt */
{
- yylhsminor.yy56 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy421, yymsp[-11].minor.yy8, yymsp[-10].minor.yy439, yymsp[-4].minor.yy421, yymsp[-2].minor.yy421, &yymsp[-9].minor.yy400, &yymsp[-7].minor.yy147, &yymsp[-6].minor.yy40, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy421, &yymsp[0].minor.yy166, &yymsp[-1].minor.yy166, yymsp[-3].minor.yy439);
+ yylhsminor.yy376 = tSetQuerySqlNode(&yymsp[-13].minor.yy0, yymsp[-12].minor.yy221, yymsp[-11].minor.yy164, yymsp[-10].minor.yy146, yymsp[-4].minor.yy221, yymsp[-2].minor.yy221, &yymsp[-9].minor.yy280, &yymsp[-7].minor.yy139, &yymsp[-6].minor.yy48, &yymsp[-8].minor.yy0, yymsp[-5].minor.yy221, &yymsp[0].minor.yy454, &yymsp[-1].minor.yy454, yymsp[-3].minor.yy146);
}
- yymsp[-13].minor.yy56 = yylhsminor.yy56;
+ yymsp[-13].minor.yy376 = yylhsminor.yy376;
break;
case 168: /* select ::= LP select RP */
-{yymsp[-2].minor.yy56 = yymsp[-1].minor.yy56;}
+{yymsp[-2].minor.yy376 = yymsp[-1].minor.yy376;}
break;
case 169: /* union ::= select */
-{ yylhsminor.yy421 = setSubclause(NULL, yymsp[0].minor.yy56); }
- yymsp[0].minor.yy421 = yylhsminor.yy421;
+{ yylhsminor.yy221 = setSubclause(NULL, yymsp[0].minor.yy376); }
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 170: /* union ::= union UNION ALL select */
-{ yylhsminor.yy421 = appendSelectClause(yymsp[-3].minor.yy421, yymsp[0].minor.yy56); }
- yymsp[-3].minor.yy421 = yylhsminor.yy421;
+{ yylhsminor.yy221 = appendSelectClause(yymsp[-3].minor.yy221, yymsp[0].minor.yy376); }
+ yymsp[-3].minor.yy221 = yylhsminor.yy221;
break;
case 171: /* cmd ::= union */
-{ setSqlInfo(pInfo, yymsp[0].minor.yy421, NULL, TSDB_SQL_SELECT); }
+{ setSqlInfo(pInfo, yymsp[0].minor.yy221, NULL, TSDB_SQL_SELECT); }
break;
case 172: /* select ::= SELECT selcollist */
{
- yylhsminor.yy56 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy421, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ yylhsminor.yy376 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy221, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
- yymsp[-1].minor.yy56 = yylhsminor.yy56;
+ yymsp[-1].minor.yy376 = yylhsminor.yy376;
break;
case 173: /* sclp ::= selcollist COMMA */
-{yylhsminor.yy421 = yymsp[-1].minor.yy421;}
- yymsp[-1].minor.yy421 = yylhsminor.yy421;
+{yylhsminor.yy221 = yymsp[-1].minor.yy221;}
+ yymsp[-1].minor.yy221 = yylhsminor.yy221;
break;
case 174: /* sclp ::= */
case 206: /* orderby_opt ::= */ yytestcase(yyruleno==206);
-{yymsp[1].minor.yy421 = 0;}
+{yymsp[1].minor.yy221 = 0;}
break;
case 175: /* selcollist ::= sclp distinct expr as */
{
- yylhsminor.yy421 = tSqlExprListAppend(yymsp[-3].minor.yy421, yymsp[-1].minor.yy439, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0);
+ yylhsminor.yy221 = tSqlExprListAppend(yymsp[-3].minor.yy221, yymsp[-1].minor.yy146, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0);
}
- yymsp[-3].minor.yy421 = yylhsminor.yy421;
+ yymsp[-3].minor.yy221 = yylhsminor.yy221;
break;
case 176: /* selcollist ::= sclp STAR */
{
- tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL);
- yylhsminor.yy421 = tSqlExprListAppend(yymsp[-1].minor.yy421, pNode, 0, 0);
+ tSqlExpr *pNode = tSqlExprCreateIdValue(pInfo, NULL, TK_ALL);
+ yylhsminor.yy221 = tSqlExprListAppend(yymsp[-1].minor.yy221, pNode, 0, 0);
}
- yymsp[-1].minor.yy421 = yylhsminor.yy421;
+ yymsp[-1].minor.yy221 = yylhsminor.yy221;
break;
case 177: /* as ::= AS ids */
{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; }
@@ -3127,85 +2810,85 @@ static YYACTIONTYPE yy_reduce(
break;
case 182: /* from ::= FROM tablelist */
case 183: /* from ::= FROM sub */ yytestcase(yyruleno==183);
-{yymsp[-1].minor.yy8 = yymsp[0].minor.yy8;}
+{yymsp[-1].minor.yy164 = yymsp[0].minor.yy164;}
break;
case 184: /* sub ::= LP union RP */
-{yymsp[-2].minor.yy8 = addSubqueryElem(NULL, yymsp[-1].minor.yy421, NULL);}
+{yymsp[-2].minor.yy164 = addSubqueryElem(NULL, yymsp[-1].minor.yy221, NULL);}
break;
case 185: /* sub ::= LP union RP ids */
-{yymsp[-3].minor.yy8 = addSubqueryElem(NULL, yymsp[-2].minor.yy421, &yymsp[0].minor.yy0);}
+{yymsp[-3].minor.yy164 = addSubqueryElem(NULL, yymsp[-2].minor.yy221, &yymsp[0].minor.yy0);}
break;
case 186: /* sub ::= sub COMMA LP union RP ids */
-{yylhsminor.yy8 = addSubqueryElem(yymsp[-5].minor.yy8, yymsp[-2].minor.yy421, &yymsp[0].minor.yy0);}
- yymsp[-5].minor.yy8 = yylhsminor.yy8;
+{yylhsminor.yy164 = addSubqueryElem(yymsp[-5].minor.yy164, yymsp[-2].minor.yy221, &yymsp[0].minor.yy0);}
+ yymsp[-5].minor.yy164 = yylhsminor.yy164;
break;
case 187: /* tablelist ::= ids cpxName */
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
- yylhsminor.yy8 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL);
+ yylhsminor.yy164 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL);
}
- yymsp[-1].minor.yy8 = yylhsminor.yy8;
+ yymsp[-1].minor.yy164 = yylhsminor.yy164;
break;
case 188: /* tablelist ::= ids cpxName ids */
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
- yylhsminor.yy8 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
+ yylhsminor.yy164 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
- yymsp[-2].minor.yy8 = yylhsminor.yy8;
+ yymsp[-2].minor.yy164 = yylhsminor.yy164;
break;
case 189: /* tablelist ::= tablelist COMMA ids cpxName */
{
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
- yylhsminor.yy8 = setTableNameList(yymsp[-3].minor.yy8, &yymsp[-1].minor.yy0, NULL);
+ yylhsminor.yy164 = setTableNameList(yymsp[-3].minor.yy164, &yymsp[-1].minor.yy0, NULL);
}
- yymsp[-3].minor.yy8 = yylhsminor.yy8;
+ yymsp[-3].minor.yy164 = yylhsminor.yy164;
break;
case 190: /* tablelist ::= tablelist COMMA ids cpxName ids */
{
yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n;
- yylhsminor.yy8 = setTableNameList(yymsp[-4].minor.yy8, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
+ yylhsminor.yy164 = setTableNameList(yymsp[-4].minor.yy164, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
- yymsp[-4].minor.yy8 = yylhsminor.yy8;
+ yymsp[-4].minor.yy164 = yylhsminor.yy164;
break;
case 191: /* tmvar ::= VARIABLE */
{yylhsminor.yy0 = yymsp[0].minor.yy0;}
yymsp[0].minor.yy0 = yylhsminor.yy0;
break;
case 192: /* interval_option ::= intervalKey LP tmvar RP */
-{yylhsminor.yy400.interval = yymsp[-1].minor.yy0; yylhsminor.yy400.offset.n = 0; yylhsminor.yy400.token = yymsp[-3].minor.yy104;}
- yymsp[-3].minor.yy400 = yylhsminor.yy400;
+{yylhsminor.yy280.interval = yymsp[-1].minor.yy0; yylhsminor.yy280.offset.n = 0; yylhsminor.yy280.token = yymsp[-3].minor.yy340;}
+ yymsp[-3].minor.yy280 = yylhsminor.yy280;
break;
case 193: /* interval_option ::= intervalKey LP tmvar COMMA tmvar RP */
-{yylhsminor.yy400.interval = yymsp[-3].minor.yy0; yylhsminor.yy400.offset = yymsp[-1].minor.yy0; yylhsminor.yy400.token = yymsp[-5].minor.yy104;}
- yymsp[-5].minor.yy400 = yylhsminor.yy400;
+{yylhsminor.yy280.interval = yymsp[-3].minor.yy0; yylhsminor.yy280.offset = yymsp[-1].minor.yy0; yylhsminor.yy280.token = yymsp[-5].minor.yy340;}
+ yymsp[-5].minor.yy280 = yylhsminor.yy280;
break;
case 194: /* interval_option ::= */
-{memset(&yymsp[1].minor.yy400, 0, sizeof(yymsp[1].minor.yy400));}
+{memset(&yymsp[1].minor.yy280, 0, sizeof(yymsp[1].minor.yy280));}
break;
case 195: /* intervalKey ::= INTERVAL */
-{yymsp[0].minor.yy104 = TK_INTERVAL;}
+{yymsp[0].minor.yy340 = TK_INTERVAL;}
break;
case 196: /* intervalKey ::= EVERY */
-{yymsp[0].minor.yy104 = TK_EVERY; }
+{yymsp[0].minor.yy340 = TK_EVERY; }
break;
case 197: /* session_option ::= */
-{yymsp[1].minor.yy147.col.n = 0; yymsp[1].minor.yy147.gap.n = 0;}
+{yymsp[1].minor.yy139.col.n = 0; yymsp[1].minor.yy139.gap.n = 0;}
break;
case 198: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- yymsp[-6].minor.yy147.col = yymsp[-4].minor.yy0;
- yymsp[-6].minor.yy147.gap = yymsp[-1].minor.yy0;
+ yymsp[-6].minor.yy139.col = yymsp[-4].minor.yy0;
+ yymsp[-6].minor.yy139.gap = yymsp[-1].minor.yy0;
}
break;
case 199: /* windowstate_option ::= */
-{ yymsp[1].minor.yy40.col.n = 0; yymsp[1].minor.yy40.col.z = NULL;}
+{ yymsp[1].minor.yy48.col.n = 0; yymsp[1].minor.yy48.col.z = NULL;}
break;
case 200: /* windowstate_option ::= STATE_WINDOW LP ids RP */
-{ yymsp[-3].minor.yy40.col = yymsp[-1].minor.yy0; }
+{ yymsp[-3].minor.yy48.col = yymsp[-1].minor.yy0; }
break;
case 201: /* fill_opt ::= */
-{ yymsp[1].minor.yy421 = 0; }
+{ yymsp[1].minor.yy221 = 0; }
break;
case 202: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */
{
@@ -3213,14 +2896,14 @@ static YYACTIONTYPE yy_reduce(
toTSDBType(yymsp[-3].minor.yy0.type);
tVariantCreate(&A, &yymsp[-3].minor.yy0);
- tVariantListInsert(yymsp[-1].minor.yy421, &A, -1, 0);
- yymsp[-5].minor.yy421 = yymsp[-1].minor.yy421;
+ tVariantListInsert(yymsp[-1].minor.yy221, &A, -1, 0);
+ yymsp[-5].minor.yy221 = yymsp[-1].minor.yy221;
}
break;
case 203: /* fill_opt ::= FILL LP ID RP */
{
toTSDBType(yymsp[-1].minor.yy0.type);
- yymsp[-3].minor.yy421 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);
+ yymsp[-3].minor.yy221 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1);
}
break;
case 204: /* sliding_opt ::= SLIDING LP tmvar RP */
@@ -3230,243 +2913,243 @@ static YYACTIONTYPE yy_reduce(
{yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; }
break;
case 207: /* orderby_opt ::= ORDER BY sortlist */
-{yymsp[-2].minor.yy421 = yymsp[0].minor.yy421;}
+{yymsp[-2].minor.yy221 = yymsp[0].minor.yy221;}
break;
case 208: /* sortlist ::= sortlist COMMA item sortorder */
{
- yylhsminor.yy421 = tVariantListAppend(yymsp[-3].minor.yy421, &yymsp[-1].minor.yy430, yymsp[0].minor.yy96);
+ yylhsminor.yy221 = tVariantListAppend(yymsp[-3].minor.yy221, &yymsp[-1].minor.yy106, yymsp[0].minor.yy172);
}
- yymsp[-3].minor.yy421 = yylhsminor.yy421;
+ yymsp[-3].minor.yy221 = yylhsminor.yy221;
break;
case 209: /* sortlist ::= item sortorder */
{
- yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[-1].minor.yy430, yymsp[0].minor.yy96);
+ yylhsminor.yy221 = tVariantListAppend(NULL, &yymsp[-1].minor.yy106, yymsp[0].minor.yy172);
}
- yymsp[-1].minor.yy421 = yylhsminor.yy421;
+ yymsp[-1].minor.yy221 = yylhsminor.yy221;
break;
case 210: /* item ::= ids cpxName */
{
toTSDBType(yymsp[-1].minor.yy0.type);
yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n;
- tVariantCreate(&yylhsminor.yy430, &yymsp[-1].minor.yy0);
+ tVariantCreate(&yylhsminor.yy106, &yymsp[-1].minor.yy0);
}
- yymsp[-1].minor.yy430 = yylhsminor.yy430;
+ yymsp[-1].minor.yy106 = yylhsminor.yy106;
break;
case 211: /* sortorder ::= ASC */
-{ yymsp[0].minor.yy96 = TSDB_ORDER_ASC; }
+{ yymsp[0].minor.yy172 = TSDB_ORDER_ASC; }
break;
case 212: /* sortorder ::= DESC */
-{ yymsp[0].minor.yy96 = TSDB_ORDER_DESC;}
+{ yymsp[0].minor.yy172 = TSDB_ORDER_DESC;}
break;
case 213: /* sortorder ::= */
-{ yymsp[1].minor.yy96 = TSDB_ORDER_ASC; }
+{ yymsp[1].minor.yy172 = TSDB_ORDER_ASC; }
break;
case 214: /* groupby_opt ::= */
-{ yymsp[1].minor.yy421 = 0;}
+{ yymsp[1].minor.yy221 = 0;}
break;
case 215: /* groupby_opt ::= GROUP BY grouplist */
-{ yymsp[-2].minor.yy421 = yymsp[0].minor.yy421;}
+{ yymsp[-2].minor.yy221 = yymsp[0].minor.yy221;}
break;
case 216: /* grouplist ::= grouplist COMMA item */
{
- yylhsminor.yy421 = tVariantListAppend(yymsp[-2].minor.yy421, &yymsp[0].minor.yy430, -1);
+ yylhsminor.yy221 = tVariantListAppend(yymsp[-2].minor.yy221, &yymsp[0].minor.yy106, -1);
}
- yymsp[-2].minor.yy421 = yylhsminor.yy421;
+ yymsp[-2].minor.yy221 = yylhsminor.yy221;
break;
case 217: /* grouplist ::= item */
{
- yylhsminor.yy421 = tVariantListAppend(NULL, &yymsp[0].minor.yy430, -1);
+ yylhsminor.yy221 = tVariantListAppend(NULL, &yymsp[0].minor.yy106, -1);
}
- yymsp[0].minor.yy421 = yylhsminor.yy421;
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 218: /* having_opt ::= */
case 228: /* where_opt ::= */ yytestcase(yyruleno==228);
case 272: /* expritem ::= */ yytestcase(yyruleno==272);
-{yymsp[1].minor.yy439 = 0;}
+{yymsp[1].minor.yy146 = 0;}
break;
case 219: /* having_opt ::= HAVING expr */
case 229: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==229);
-{yymsp[-1].minor.yy439 = yymsp[0].minor.yy439;}
+{yymsp[-1].minor.yy146 = yymsp[0].minor.yy146;}
break;
case 220: /* limit_opt ::= */
case 224: /* slimit_opt ::= */ yytestcase(yyruleno==224);
-{yymsp[1].minor.yy166.limit = -1; yymsp[1].minor.yy166.offset = 0;}
+{yymsp[1].minor.yy454.limit = -1; yymsp[1].minor.yy454.offset = 0;}
break;
case 221: /* limit_opt ::= LIMIT signed */
case 225: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==225);
-{yymsp[-1].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-1].minor.yy166.offset = 0;}
+{yymsp[-1].minor.yy454.limit = yymsp[0].minor.yy109; yymsp[-1].minor.yy454.offset = 0;}
break;
case 222: /* limit_opt ::= LIMIT signed OFFSET signed */
-{ yymsp[-3].minor.yy166.limit = yymsp[-2].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[0].minor.yy325;}
+{ yymsp[-3].minor.yy454.limit = yymsp[-2].minor.yy109; yymsp[-3].minor.yy454.offset = yymsp[0].minor.yy109;}
break;
case 223: /* limit_opt ::= LIMIT signed COMMA signed */
-{ yymsp[-3].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[-2].minor.yy325;}
+{ yymsp[-3].minor.yy454.limit = yymsp[0].minor.yy109; yymsp[-3].minor.yy454.offset = yymsp[-2].minor.yy109;}
break;
case 226: /* slimit_opt ::= SLIMIT signed SOFFSET signed */
-{yymsp[-3].minor.yy166.limit = yymsp[-2].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[0].minor.yy325;}
+{yymsp[-3].minor.yy454.limit = yymsp[-2].minor.yy109; yymsp[-3].minor.yy454.offset = yymsp[0].minor.yy109;}
break;
case 227: /* slimit_opt ::= SLIMIT signed COMMA signed */
-{yymsp[-3].minor.yy166.limit = yymsp[0].minor.yy325; yymsp[-3].minor.yy166.offset = yymsp[-2].minor.yy325;}
+{yymsp[-3].minor.yy454.limit = yymsp[0].minor.yy109; yymsp[-3].minor.yy454.offset = yymsp[-2].minor.yy109;}
break;
case 230: /* expr ::= LP expr RP */
-{yylhsminor.yy439 = yymsp[-1].minor.yy439; yylhsminor.yy439->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy439->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = yymsp[-1].minor.yy146; yylhsminor.yy146->exprToken.z = yymsp[-2].minor.yy0.z; yylhsminor.yy146->exprToken.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 231: /* expr ::= ID */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_ID);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 232: /* expr ::= ID DOT ID */
-{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ID);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 233: /* expr ::= ID DOT STAR */
-{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[-2].minor.yy0, TK_ALL);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 234: /* expr ::= INTEGER */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_INTEGER);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 235: /* expr ::= MINUS INTEGER */
case 236: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==236);
-{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);}
- yymsp[-1].minor.yy439 = yylhsminor.yy439;
+{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_INTEGER);}
+ yymsp[-1].minor.yy146 = yylhsminor.yy146;
break;
case 237: /* expr ::= FLOAT */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_FLOAT);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 238: /* expr ::= MINUS FLOAT */
case 239: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==239);
-{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);}
- yymsp[-1].minor.yy439 = yylhsminor.yy439;
+{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_FLOAT);}
+ yymsp[-1].minor.yy146 = yylhsminor.yy146;
break;
case 240: /* expr ::= STRING */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_STRING);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 241: /* expr ::= NOW */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); }
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NOW); }
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 242: /* expr ::= VARIABLE */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_VARIABLE);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 243: /* expr ::= PLUS VARIABLE */
case 244: /* expr ::= MINUS VARIABLE */ yytestcase(yyruleno==244);
-{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_VARIABLE);}
- yymsp[-1].minor.yy439 = yylhsminor.yy439;
+{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_VARIABLE; yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[-1].minor.yy0, TK_VARIABLE);}
+ yymsp[-1].minor.yy146 = yylhsminor.yy146;
break;
case 245: /* expr ::= BOOL */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_BOOL);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 246: /* expr ::= NULL */
-{ yylhsminor.yy439 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{ yylhsminor.yy146 = tSqlExprCreateIdValue(pInfo, &yymsp[0].minor.yy0, TK_NULL);}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 247: /* expr ::= ID LP exprlist RP */
-{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy439 = tSqlExprCreateFunction(yymsp[-1].minor.yy421, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
- yymsp[-3].minor.yy439 = yylhsminor.yy439;
+{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy146 = tSqlExprCreateFunction(yymsp[-1].minor.yy221, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
+ yymsp[-3].minor.yy146 = yylhsminor.yy146;
break;
case 248: /* expr ::= ID LP STAR RP */
-{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy439 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
- yymsp[-3].minor.yy439 = yylhsminor.yy439;
+{ tStrTokenAppend(pInfo->funcs, &yymsp[-3].minor.yy0); yylhsminor.yy146 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); }
+ yymsp[-3].minor.yy146 = yylhsminor.yy146;
break;
case 249: /* expr ::= expr IS NULL */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, NULL, TK_ISNULL);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, NULL, TK_ISNULL);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 250: /* expr ::= expr IS NOT NULL */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-3].minor.yy439, NULL, TK_NOTNULL);}
- yymsp[-3].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-3].minor.yy146, NULL, TK_NOTNULL);}
+ yymsp[-3].minor.yy146 = yylhsminor.yy146;
break;
case 251: /* expr ::= expr LT expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LT);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_LT);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 252: /* expr ::= expr GT expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_GT);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_GT);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 253: /* expr ::= expr LE expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LE);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_LE);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 254: /* expr ::= expr GE expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_GE);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_GE);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 255: /* expr ::= expr NE expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_NE);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_NE);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 256: /* expr ::= expr EQ expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_EQ);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_EQ);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 257: /* expr ::= expr BETWEEN expr AND expr */
-{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy439); yylhsminor.yy439 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy439, yymsp[-2].minor.yy439, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy439, TK_LE), TK_AND);}
- yymsp[-4].minor.yy439 = yylhsminor.yy439;
+{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy146); yylhsminor.yy146 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy146, yymsp[-2].minor.yy146, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy146, TK_LE), TK_AND);}
+ yymsp[-4].minor.yy146 = yylhsminor.yy146;
break;
case 258: /* expr ::= expr AND expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_AND);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_AND);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 259: /* expr ::= expr OR expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_OR); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_OR); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 260: /* expr ::= expr PLUS expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_PLUS); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_PLUS); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 261: /* expr ::= expr MINUS expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_MINUS); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_MINUS); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 262: /* expr ::= expr STAR expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_STAR); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_STAR); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 263: /* expr ::= expr SLASH expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_DIVIDE);}
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_DIVIDE);}
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 264: /* expr ::= expr REM expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_REM); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_REM); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 265: /* expr ::= expr LIKE expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_LIKE); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_LIKE); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 266: /* expr ::= expr MATCH expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_MATCH); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_MATCH); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 267: /* expr ::= expr NMATCH expr */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-2].minor.yy439, yymsp[0].minor.yy439, TK_NMATCH); }
- yymsp[-2].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-2].minor.yy146, yymsp[0].minor.yy146, TK_NMATCH); }
+ yymsp[-2].minor.yy146 = yylhsminor.yy146;
break;
case 268: /* expr ::= expr IN LP exprlist RP */
-{yylhsminor.yy439 = tSqlExprCreate(yymsp[-4].minor.yy439, (tSqlExpr*)yymsp[-1].minor.yy421, TK_IN); }
- yymsp[-4].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = tSqlExprCreate(yymsp[-4].minor.yy146, (tSqlExpr*)yymsp[-1].minor.yy221, TK_IN); }
+ yymsp[-4].minor.yy146 = yylhsminor.yy146;
break;
case 269: /* exprlist ::= exprlist COMMA expritem */
-{yylhsminor.yy421 = tSqlExprListAppend(yymsp[-2].minor.yy421,yymsp[0].minor.yy439,0, 0);}
- yymsp[-2].minor.yy421 = yylhsminor.yy421;
+{yylhsminor.yy221 = tSqlExprListAppend(yymsp[-2].minor.yy221,yymsp[0].minor.yy146,0, 0);}
+ yymsp[-2].minor.yy221 = yylhsminor.yy221;
break;
case 270: /* exprlist ::= expritem */
-{yylhsminor.yy421 = tSqlExprListAppend(0,yymsp[0].minor.yy439,0, 0);}
- yymsp[0].minor.yy421 = yylhsminor.yy421;
+{yylhsminor.yy221 = tSqlExprListAppend(0,yymsp[0].minor.yy146,0, 0);}
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 271: /* expritem ::= expr */
-{yylhsminor.yy439 = yymsp[0].minor.yy439;}
- yymsp[0].minor.yy439 = yylhsminor.yy439;
+{yylhsminor.yy146 = yymsp[0].minor.yy146;}
+ yymsp[0].minor.yy146 = yylhsminor.yy146;
break;
case 273: /* cmd ::= RESET QUERY CACHE */
{ setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
@@ -3477,7 +3160,7 @@ static YYACTIONTYPE yy_reduce(
case 275: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
@@ -3495,14 +3178,14 @@ static YYACTIONTYPE yy_reduce(
case 277: /* cmd ::= ALTER TABLE ids cpxName MODIFY COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
case 278: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
@@ -3537,7 +3220,7 @@ static YYACTIONTYPE yy_reduce(
toTSDBType(yymsp[-2].minor.yy0.type);
SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1);
- A = tVariantListAppend(A, &yymsp[0].minor.yy430, -1);
+ A = tVariantListAppend(A, &yymsp[0].minor.yy106, -1);
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
@@ -3546,14 +3229,14 @@ static YYACTIONTYPE yy_reduce(
case 282: /* cmd ::= ALTER TABLE ids cpxName MODIFY TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, -1);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
case 283: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
@@ -3571,14 +3254,14 @@ static YYACTIONTYPE yy_reduce(
case 285: /* cmd ::= ALTER STABLE ids cpxName MODIFY COLUMN columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_CHANGE_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
case 286: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
@@ -3613,7 +3296,7 @@ static YYACTIONTYPE yy_reduce(
toTSDBType(yymsp[-2].minor.yy0.type);
SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1);
- A = tVariantListAppend(A, &yymsp[0].minor.yy430, -1);
+ A = tVariantListAppend(A, &yymsp[0].minor.yy106, -1);
SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
@@ -3622,7 +3305,7 @@ static YYACTIONTYPE yy_reduce(
case 290: /* cmd ::= ALTER STABLE ids cpxName MODIFY TAG columnlist */
{
yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n;
- SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy421, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
+ SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy221, NULL, TSDB_ALTER_TABLE_MODIFY_TAG_COLUMN, TSDB_SUPER_TABLE);
setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE);
}
break;
@@ -3639,9 +3322,9 @@ static YYACTIONTYPE yy_reduce(
break;
/********** End reduce actions ************************************************/
};
- assert( yyrulenostateno = (YYACTIONTYPE)yyact;
yymsp->major = (YYCODETYPE)yygoto;
yyTraceShift(yypParser, yyact, "... then shift");
- return yyact;
}
/*
@@ -3666,8 +3348,7 @@ static YYACTIONTYPE yy_reduce(
static void yy_parse_failed(
yyParser *yypParser /* The parser */
){
- ParseARG_FETCH
- ParseCTX_FETCH
+ ParseARG_FETCH;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
@@ -3678,8 +3359,7 @@ static void yy_parse_failed(
** parser fails */
/************ Begin %parse_failure code ***************************************/
/************ End %parse_failure code *****************************************/
- ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
- ParseCTX_STORE
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
}
#endif /* YYNOERRORRECOVERY */
@@ -3691,8 +3371,7 @@ static void yy_syntax_error(
int yymajor, /* The major type of the error token */
ParseTOKENTYPE yyminor /* The minor type of the error token */
){
- ParseARG_FETCH
- ParseCTX_FETCH
+ ParseARG_FETCH;
#define TOKEN yyminor
/************ Begin %syntax_error code ****************************************/
@@ -3718,8 +3397,7 @@ static void yy_syntax_error(
assert(len <= outputBufLen);
/************ End %syntax_error code ******************************************/
- ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
- ParseCTX_STORE
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
}
/*
@@ -3728,8 +3406,7 @@ static void yy_syntax_error(
static void yy_accept(
yyParser *yypParser /* The parser */
){
- ParseARG_FETCH
- ParseCTX_FETCH
+ ParseARG_FETCH;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
@@ -3744,8 +3421,7 @@ static void yy_accept(
/*********** Begin %parse_accept code *****************************************/
/*********** End %parse_accept code *******************************************/
- ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
- ParseCTX_STORE
+ ParseARG_STORE; /* Suppress warning about unused %extra_argument variable */
}
/* The main parser program.
@@ -3774,47 +3450,45 @@ void Parse(
ParseARG_PDECL /* Optional %extra_argument parameter */
){
YYMINORTYPE yyminorunion;
- YYACTIONTYPE yyact; /* The parser action. */
+ unsigned int yyact; /* The parser action. */
#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
int yyendofinput; /* True if we are at the end of input */
#endif
#ifdef YYERRORSYMBOL
int yyerrorhit = 0; /* True if yymajor has invoked an error */
#endif
- yyParser *yypParser = (yyParser*)yyp; /* The parser */
- ParseCTX_FETCH
- ParseARG_STORE
+ yyParser *yypParser; /* The parser */
+ yypParser = (yyParser*)yyp;
assert( yypParser->yytos!=0 );
#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
yyendofinput = (yymajor==0);
#endif
+ ParseARG_STORE;
- yyact = yypParser->yytos->stateno;
#ifndef NDEBUG
if( yyTraceFILE ){
- if( yyact < YY_MIN_REDUCE ){
+ int stateno = yypParser->yytos->stateno;
+ if( stateno < YY_MIN_REDUCE ){
fprintf(yyTraceFILE,"%sInput '%s' in state %d\n",
- yyTracePrompt,yyTokenName[yymajor],yyact);
+ yyTracePrompt,yyTokenName[yymajor],stateno);
}else{
fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n",
- yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE);
+ yyTracePrompt,yyTokenName[yymajor],stateno-YY_MIN_REDUCE);
}
}
#endif
do{
- assert( yyact==yypParser->yytos->stateno );
- yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact);
+ yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor);
if( yyact >= YY_MIN_REDUCE ){
- yyact = yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,
- yyminor ParseCTX_PARAM);
+ yy_reduce(yypParser,yyact-YY_MIN_REDUCE,yymajor,yyminor);
}else if( yyact <= YY_MAX_SHIFTREDUCE ){
- yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor);
+ yy_shift(yypParser,yyact,yymajor,yyminor);
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt--;
#endif
- break;
+ yymajor = YYNOCODE;
}else if( yyact==YY_ACCEPT_ACTION ){
yypParser->yytos--;
yy_accept(yypParser);
@@ -3865,9 +3539,10 @@ void Parse(
yymajor = YYNOCODE;
}else{
while( yypParser->yytos >= yypParser->yystack
+ && yymx != YYERRORSYMBOL
&& (yyact = yy_find_reduce_action(
yypParser->yytos->stateno,
- YYERRORSYMBOL)) > YY_MAX_SHIFTREDUCE
+ YYERRORSYMBOL)) >= YY_MIN_REDUCE
){
yy_pop_parser_stack(yypParser);
}
@@ -3884,8 +3559,6 @@ void Parse(
}
yypParser->yyerrcnt = 3;
yyerrorhit = 1;
- if( yymajor==YYNOCODE ) break;
- yyact = yypParser->yytos->stateno;
#elif defined(YYNOERRORRECOVERY)
/* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
** do any kind of error recovery. Instead, simply invoke the syntax
@@ -3896,7 +3569,8 @@ void Parse(
*/
yy_syntax_error(yypParser,yymajor, yyminor);
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
- break;
+ yymajor = YYNOCODE;
+
#else /* YYERRORSYMBOL is not defined */
/* This is what we do if the grammar does not define ERROR:
**
@@ -3918,10 +3592,10 @@ void Parse(
yypParser->yyerrcnt = -1;
#endif
}
- break;
+ yymajor = YYNOCODE;
#endif
}
- }while( yypParser->yytos>yypParser->yystack );
+ }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack );
#ifndef NDEBUG
if( yyTraceFILE ){
yyStackEntry *i;
@@ -3936,17 +3610,3 @@ void Parse(
#endif
return;
}
-
-/*
-** Return the fallback token corresponding to canonical token iToken, or
-** 0 if iToken has no fallback.
-*/
-int ParseFallback(int iToken){
-#ifdef YYFALLBACK
- assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) );
- return yyFallback[iToken];
-#else
- (void)iToken;
- return 0;
-#endif
-}
diff --git a/src/query/src/tdigest.c b/src/query/src/tdigest.c
index 109fd7574f04a7f82e92f112551ca9494c7e667a..4870d1ff60d2cd7db69a01587d5e48515bdf67d7 100644
--- a/src/query/src/tdigest.c
+++ b/src/query/src/tdigest.c
@@ -296,7 +296,7 @@ double tdigestQuantile(TDigest *t, double q) {
a = b;
right = t->max;
- if (idx < weight_so_far + a->weight) {
+ if (idx < weight_so_far + a->weight && a->weight != 0) {
double p = (idx - weight_so_far) / a->weight;
return left * (1 - p) + right * p;
}
diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c
index c86ab8549974712658ad3d381c4141427c000762..623d6e3cc0c20ef3c69b4ebfb6752616c1ff56b0 100644
--- a/src/sync/src/syncRetrieve.c
+++ b/src/sync/src/syncRetrieve.c
@@ -233,7 +233,7 @@ static int64_t syncProcessLastWal(SSyncPeer *pPeer, char *wname, int64_t index)
int32_t once = 0; // last WAL has once ever been processed
int64_t offset = 0;
uint64_t fversion = 0;
- char fname[TSDB_FILENAME_LEN * 2] = {0}; // full path to wal file
+ char fname[TSDB_FILENAME_LEN * 3] = {0}; // full path to wal file
// get full path to wal file
snprintf(fname, sizeof(fname), "%s/%s", pNode->path, wname);
diff --git a/src/tsdb/inc/tsdbMeta.h b/src/tsdb/inc/tsdbMeta.h
index 63b05df3e032bafe5706d1ce18a9c4d1810442b1..0b7af561cda8d9c37201f99c7ab467b4e1598d37 100644
--- a/src/tsdb/inc/tsdbMeta.h
+++ b/src/tsdb/inc/tsdbMeta.h
@@ -41,6 +41,7 @@ typedef struct STable {
int16_t restoreColumnNum;
bool hasRestoreLastColumn;
int lastColSVersion;
+ int16_t cacheLastConfigVersion;
T_REF_DECLARE()
} STable;
diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h
index 80e92975799f47d68ff72ef80a52efb6fe901b5e..a7f08e61ed1ac576c1ca70c0da20e14f4b3a306f 100644
--- a/src/tsdb/inc/tsdbint.h
+++ b/src/tsdb/inc/tsdbint.h
@@ -79,8 +79,8 @@ struct STsdbRepo {
STsdbCfg save_config; // save apply config
bool config_changed; // config changed flag
pthread_mutex_t save_mutex; // protect save config
-
- uint8_t hasCachedLastColumn;
+
+ int16_t cacheLastConfigVersion;
STsdbAppH appH;
STsdbStat stat;
@@ -111,7 +111,8 @@ int tsdbUnlockRepo(STsdbRepo* pRepo);
STsdbMeta* tsdbGetMeta(STsdbRepo* pRepo);
int tsdbCheckCommit(STsdbRepo* pRepo);
int tsdbRestoreInfo(STsdbRepo* pRepo);
-int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg);
+UNUSED_FUNC int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg);
+int32_t tsdbLoadLastCache(STsdbRepo *pRepo, STable* pTable);
void tsdbGetRootDir(int repoid, char dirName[]);
void tsdbGetDataDir(int repoid, char dirName[]);
diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c
index eccdcbdbf6155ca37dc35bc171189de3781e8642..7ac37a72b44483ed2e3b25bbdbb40cd00c1958c8 100644
--- a/src/tsdb/src/tsdbCommit.c
+++ b/src/tsdb/src/tsdbCommit.c
@@ -229,7 +229,7 @@ int tsdbWriteBlockIdx(SDFile *pHeadf, SArray *pIdxA, void **ppBuf) {
SBlockIdx *pBlkIdx;
size_t nidx = taosArrayGetSize(pIdxA);
int tlen = 0, size;
- int64_t offset;
+ int64_t offset = 0;
if (nidx <= 0) {
// All data are deleted
@@ -1186,7 +1186,7 @@ int tsdbWriteBlockImpl(STsdbRepo *pRepo, STable *pTable, SDFile *pDFile, SDFile
return -1;
}
- uint32_t aggrStatus = ((nColsNotAllNull > 0) && (rowsToWrite > 8)) ? 1 : 0; // TODO: How to make the decision?
+ uint32_t aggrStatus = nColsNotAllNull > 0 ? 1 : 0;
if (aggrStatus > 0) {
taosCalcChecksumAppend(0, (uint8_t *)pAggrBlkData, tsizeAggr);
@@ -1468,7 +1468,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
- pTarget->maxPoints);
+ pTarget->maxPoints, 0);
}
pTarget->numOfRows++;
@@ -1480,7 +1480,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
- tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
+ tdAppendMemRowToDataCol(row, pSchema, pTarget, true, 0);
tSkipListIterNext(pCommitIter->pIter);
} else {
@@ -1489,7 +1489,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
- pTarget->maxPoints);
+ pTarget->maxPoints, 0);
}
if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++;
@@ -1502,7 +1502,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
- tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
+ tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE,
+ update != TD_ROW_PARTIAL_UPDATE ? 0 : -1);
}
(*iter)++;
tSkipListIterNext(pCommitIter->pIter);
diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c
index 59fb4f334d3006eb7e8807ce193d61905f2322d2..dccb85af55b8f76739f16621a0959c29fb373b4a 100644
--- a/src/tsdb/src/tsdbCommitQueue.c
+++ b/src/tsdb/src/tsdbCommitQueue.c
@@ -146,7 +146,9 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) {
if (oldCfg.cacheLastRow != pRepo->config.cacheLastRow) {
if (tsdbLockRepo(pRepo) < 0) return;
- tsdbCacheLastData(pRepo, &oldCfg);
+ // tsdbCacheLastData(pRepo, &oldCfg);
+ // lazy load last cache when query or update
+ ++pRepo->cacheLastConfigVersion;
tsdbUnlockRepo(pRepo);
}
diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c
index 3b5e8ce56dab297c5b6cc4a9b07d8150445917b9..cefbf09cd0649cb1c994c1e660ee2a85730d3f58 100644
--- a/src/tsdb/src/tsdbCompact.c
+++ b/src/tsdb/src/tsdbCompact.c
@@ -67,7 +67,9 @@ int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); }
void *tsdbCompactImpl(STsdbRepo *pRepo) {
// Check if there are files in TSDB FS to compact
if (REPO_FS(pRepo)->cstatus->pmf == NULL) {
- tsdbInfo("vgId:%d no file to compact in FS", REPO_ID(pRepo));
+ pRepo->compactState = TSDB_NO_COMPACT;
+ tsem_post(&(pRepo->readyToCommit));
+ tsdbInfo("vgId:%d compact over, no file to compact in FS", REPO_ID(pRepo));
return NULL;
}
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index 23835d659d3157b789f61bd7e50f21861d863433..dfa4b74b7a5720398f9fc748078a0be6d870dda7 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -185,19 +185,10 @@ int tsdbUnlockRepo(STsdbRepo *pRepo) {
return 0;
}
-bool tsdbIsNeedCommit(STsdbRepo *pRepo) {
- int nVal = 0;
- if (sem_getvalue(&pRepo->readyToCommit, &nVal) != 0) {
- tsdbError("vgId:%d failed to sem_getvalue of readyToCommit", REPO_ID(pRepo));
- return false;
- }
- return nVal > 0;
-}
-
int tsdbCheckWal(STsdbRepo *pRepo, uint32_t walSize) { // MB
STsdbCfg *pCfg = &(pRepo->config);
if ((walSize > tsdbWalFlushSize) && (walSize > (pCfg->totalBlocks / 2 * pCfg->cacheBlockSize))) {
- if (tsdbIsNeedCommit(pRepo) && (tsdbAsyncCommit(pRepo) < 0)) return -1;
+ if (tsdbAsyncCommit(pRepo) < 0) return -1;
}
return 0;
}
@@ -211,7 +202,7 @@ int tsdbCheckCommit(STsdbRepo *pRepo) {
if ((pRepo->mem->extraBuffList != NULL) ||
((listNEles(pRepo->mem->bufBlockList) >= pCfg->totalBlocks / 3) && (pBufBlock->remain < TSDB_BUFFER_RESERVE))) {
// trigger commit
- if (tsdbIsNeedCommit(pRepo) && (tsdbAsyncCommit(pRepo) < 0)) return -1;
+ if (tsdbAsyncCommit(pRepo) < 0) return -1;
}
return 0;
}
@@ -585,7 +576,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
return NULL;
}
pRepo->config_changed = false;
- atomic_store_8(&pRepo->hasCachedLastColumn, 0);
+ pRepo->cacheLastConfigVersion = 0;
code = tsem_init(&(pRepo->readyToCommit), 0, 1);
if (code != 0) {
@@ -735,7 +726,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
loadStatisData = true;
}
}
-
+ TSDB_WLOCK_TABLE(pTable); // lock when update pTable->lastCols[]
for (int16_t i = 0; i < numColumns && numColumns > pTable->restoreColumnNum; ++i) {
STColumn *pCol = schemaColAt(pSchema, i);
// ignore loaded columns
@@ -784,6 +775,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
break;
}
}
+ TSDB_WUNLOCK_TABLE(pTable);
}
out:
@@ -812,20 +804,33 @@ static int tsdbRestoreLastRow(STsdbRepo *pRepo, STable *pTable, SReadH* pReadh,
// Get the data in row
STSchema *pSchema = tsdbGetTableSchema(pTable);
- pTable->lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
- if (pTable->lastRow == NULL) {
+ SMemRow lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
+ if (lastRow == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
- memRowSetType(pTable->lastRow, SMEM_ROW_DATA);
- tdInitDataRow(memRowDataBody(pTable->lastRow), pSchema);
+ memRowSetType(lastRow, SMEM_ROW_DATA);
+ tdInitDataRow(memRowDataBody(lastRow), pSchema);
for (int icol = 0; icol < schemaNCols(pSchema); icol++) {
STColumn *pCol = schemaColAt(pSchema, icol);
SDataCol *pDataCol = pReadh->pDCols[0]->cols + icol;
- tdAppendColVal(memRowDataBody(pTable->lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type,
+ tdAppendColVal(memRowDataBody(lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type,
pCol->offset);
}
+ TSKEY lastKey = memRowKey(lastRow);
+
+ // during the load data in file, new data would be inserted and last row has been updated
+ TSDB_WLOCK_TABLE(pTable);
+ if (pTable->lastRow == NULL) {
+ pTable->lastKey = lastKey;
+ pTable->lastRow = lastRow;
+ TSDB_WUNLOCK_TABLE(pTable);
+ } else {
+ TSDB_WUNLOCK_TABLE(pTable);
+ taosTZfree(lastRow);
+ }
+
return 0;
}
@@ -896,14 +901,105 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbDestroyReadH(&readh);
- if (CACHE_LAST_NULL_COLUMN(pCfg)) {
- atomic_store_8(&pRepo->hasCachedLastColumn, 1);
+ // if (CACHE_LAST_NULL_COLUMN(pCfg)) {
+ // atomic_store_8(&pRepo->hasCachedLastColumn, 1);
+ // }
+
+ return 0;
+}
+
+int32_t tsdbLoadLastCache(STsdbRepo *pRepo, STable *pTable) {
+ SFSIter fsiter;
+ SReadH readh;
+ SDFileSet *pSet;
+ int cacheLastRowTableNum = 0;
+ int cacheLastColTableNum = 0;
+
+ bool cacheLastRow = CACHE_LAST_ROW(&(pRepo->config));
+ bool cacheLastCol = CACHE_LAST_NULL_COLUMN(&(pRepo->config));
+
+ tsdbDebug("tsdbLoadLastCache for %s, cacheLastRow:%d, cacheLastCol:%d", pTable->name->data, cacheLastRow, cacheLastCol);
+
+ pTable->cacheLastConfigVersion = pRepo->cacheLastConfigVersion;
+
+ if (!cacheLastRow && pTable->lastRow != NULL) {
+ taosTZfree(pTable->lastRow);
+ pTable->lastRow = NULL;
+ }
+ if (!cacheLastCol && pTable->lastCols != NULL) {
+ tsdbFreeLastColumns(pTable);
+ }
+
+ if (!cacheLastRow && !cacheLastCol) {
+ return 0;
}
+ cacheLastRowTableNum = (cacheLastRow && pTable->lastRow == NULL) ? 1 : 0;
+ cacheLastColTableNum = (cacheLastCol && pTable->lastCols == NULL) ? 1 : 0;
+
+ if (cacheLastRowTableNum == 0 && cacheLastColTableNum == 0) {
+ return 0;
+ }
+
+ if (tsdbInitReadH(&readh, pRepo) < 0) {
+ return -1;
+ }
+
+ tsdbRLockFS(REPO_FS(pRepo));
+ tsdbFSIterInit(&fsiter, REPO_FS(pRepo), TSDB_FS_ITER_BACKWARD);
+
+ while ((cacheLastRowTableNum > 0 || cacheLastColTableNum > 0) && (pSet = tsdbFSIterNext(&fsiter)) != NULL) {
+ if (tsdbSetAndOpenReadFSet(&readh, pSet) < 0) {
+ tsdbUnLockFS(REPO_FS(pRepo));
+ tsdbDestroyReadH(&readh);
+ return -1;
+ }
+
+ if (tsdbLoadBlockIdx(&readh) < 0) {
+ tsdbUnLockFS(REPO_FS(pRepo));
+ tsdbDestroyReadH(&readh);
+ return -1;
+ }
+
+ // tsdbDebug("tsdbRestoreInfo restore vgId:%d,table:%s", REPO_ID(pRepo), pTable->name->data);
+
+ if (tsdbSetReadTable(&readh, pTable) < 0) {
+ tsdbUnLockFS(REPO_FS(pRepo));
+ tsdbDestroyReadH(&readh);
+ return -1;
+ }
+
+ SBlockIdx *pIdx = readh.pBlkIdx;
+
+ if (pIdx && (cacheLastRowTableNum > 0) && (pTable->lastRow == NULL)) {
+ if (tsdbRestoreLastRow(pRepo, pTable, &readh, pIdx) != 0) {
+ tsdbUnLockFS(REPO_FS(pRepo));
+ tsdbDestroyReadH(&readh);
+ return -1;
+ }
+ cacheLastRowTableNum -= 1;
+ }
+
+ // restore NULL columns
+ if (pIdx && (cacheLastColTableNum > 0) && !pTable->hasRestoreLastColumn) {
+ if (tsdbRestoreLastColumns(pRepo, pTable, &readh) != 0) {
+ tsdbUnLockFS(REPO_FS(pRepo));
+ tsdbDestroyReadH(&readh);
+ return -1;
+ }
+ if (pTable->hasRestoreLastColumn) {
+ cacheLastColTableNum -= 1;
+ }
+ }
+ }
+
+ tsdbUnLockFS(REPO_FS(pRepo));
+ tsdbDestroyReadH(&readh);
+
return 0;
}
-int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
+UNUSED_FUNC int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
bool cacheLastRow = false, cacheLastCol = false;
SFSIter fsiter;
SReadH readh;
@@ -937,9 +1033,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
// if close last option,need to free data
if (need_free_last_row || need_free_last_col) {
- if (need_free_last_col) {
- atomic_store_8(&pRepo->hasCachedLastColumn, 0);
- }
+ // if (need_free_last_col) {
+ // atomic_store_8(&pRepo->hasCachedLastColumn, 0);
+ // }
tsdbInfo("free cache last data since cacheLast option changed");
for (int i = 1; i <= maxTableIdx; i++) {
STable *pTable = pMeta->tables[i];
@@ -1017,9 +1113,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
tsdbDestroyReadH(&readh);
- if (cacheLastCol) {
- atomic_store_8(&pRepo->hasCachedLastColumn, 1);
- }
+ // if (cacheLastCol) {
+ // atomic_store_8(&pRepo->hasCachedLastColumn, 1);
+ // }
return 0;
}
diff --git a/src/tsdb/src/tsdbMemTable.c b/src/tsdb/src/tsdbMemTable.c
index 584493175f72a86e6ca1957f28bdfbc649b7965c..28993f08c447c2a84a4493e40d74e924ad656c74 100644
--- a/src/tsdb/src/tsdbMemTable.c
+++ b/src/tsdb/src/tsdbMemTable.c
@@ -594,7 +594,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema *
}
}
- tdAppendMemRowToDataCol(row, *ppSchema, pCols, true);
+ tdAppendMemRowToDataCol(row, *ppSchema, pCols, true, 0);
}
return 0;
@@ -647,7 +647,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
TSKEY now = taosGetTimestamp(pRepo->config.precision);
TSKEY minKey = now - tsTickPerDay[pRepo->config.precision] * pRepo->config.keep;
TSKEY maxKey = now + tsTickPerDay[pRepo->config.precision] * pRepo->config.daysPerFile;
-
+
terrno = TSDB_CODE_SUCCESS;
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
@@ -1001,7 +1001,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
if ((value == NULL) || isNull(value, pTCol->type)) {
continue;
}
-
+ // lock
+ TSDB_WLOCK_TABLE(pTable);
SDataCol *pDataCol = &(pLatestCols[idx]);
if (pDataCol->pData == NULL) {
pDataCol->pData = malloc(pTCol->bytes);
@@ -1017,6 +1018,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
memcpy(pDataCol->pData, value, bytes);
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
pDataCol->ts = memRowKey(row);
+ // unlock
+ TSDB_WUNLOCK_TABLE(pTable);
}
}
@@ -1063,5 +1066,8 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r
updateTableLatestColumn(pRepo, pTable, row);
}
}
+
+ pTable->cacheLastConfigVersion = pRepo->cacheLastConfigVersion;
+
return 0;
}
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index 4af49534c9d3b2be8fe73b44151bfd347944ddc8..4621712632a6089e5f52724f72517a7aae2697dc 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -639,27 +639,30 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) {
}
int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) {
- ASSERT(pTable->lastCols == NULL);
+ TSDB_WLOCK_TABLE(pTable);
+ if (pTable->lastCols == NULL) {
+ int16_t numOfColumn = pSchema->numOfCols;
- int16_t numOfColumn = pSchema->numOfCols;
+ pTable->lastCols = (SDataCol *)malloc(numOfColumn * sizeof(SDataCol));
+ if (pTable->lastCols == NULL) {
+ TSDB_WUNLOCK_TABLE(pTable);
+ return -1;
+ }
- pTable->lastCols = (SDataCol*)malloc(numOfColumn * sizeof(SDataCol));
- if (pTable->lastCols == NULL) {
- return -1;
- }
+ for (int16_t i = 0; i < numOfColumn; ++i) {
+ STColumn *pCol = schemaColAt(pSchema, i);
+ SDataCol *pDataCol = &(pTable->lastCols[i]);
+ pDataCol->bytes = 0;
+ pDataCol->pData = NULL;
+ pDataCol->colId = pCol->colId;
+ }
- for (int16_t i = 0; i < numOfColumn; ++i) {
- STColumn *pCol = schemaColAt(pSchema, i);
- SDataCol* pDataCol = &(pTable->lastCols[i]);
- pDataCol->bytes = 0;
- pDataCol->pData = NULL;
- pDataCol->colId = pCol->colId;
+ pTable->lastColSVersion = schemaVersion(pSchema);
+ pTable->maxColNum = numOfColumn;
+ pTable->restoreColumnNum = 0;
+ pTable->hasRestoreLastColumn = false;
}
-
- pTable->lastColSVersion = schemaVersion(pSchema);
- pTable->maxColNum = numOfColumn;
- pTable->restoreColumnNum = 0;
- pTable->hasRestoreLastColumn = false;
+ TSDB_WUNLOCK_TABLE(pTable);
return 0;
}
@@ -809,6 +812,7 @@ static STable *tsdbNewTable() {
pTable->lastCols = NULL;
pTable->restoreColumnNum = 0;
+ pTable->cacheLastConfigVersion = 0;
pTable->maxColNum = 0;
pTable->hasRestoreLastColumn = false;
pTable->lastColSVersion = -1;
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 5e4ab00b4158d2f1c15b3fe47e3a296ff429edfa..bf9206445926b8151861fc3366d8327a0077a87f 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -157,6 +157,7 @@ typedef struct STableGroupSupporter {
static STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList);
static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList);
static int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle);
+static int32_t lazyLoadCacheLast(STsdbQueryHandle* pQueryHandle);
static int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey);
static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
@@ -591,6 +592,28 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
}
+static int32_t lazyLoadCacheLast(STsdbQueryHandle* pQueryHandle) {
+ STsdbRepo* pRepo = pQueryHandle->pTsdb;
+
+ size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
+ int32_t code = 0;
+ for (size_t i = 0; i < numOfTables; ++i) {
+ STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
+ STable* pTable = pCheckInfo->pTableObj;
+ if (pTable->cacheLastConfigVersion == pRepo->cacheLastConfigVersion) {
+ continue;
+ }
+ code = tsdbLoadLastCache(pRepo, pTable);
+ if (code != 0) {
+ tsdbError("%p uid:%" PRId64 ", tid:%d, failed to load last cache since %s", pQueryHandle, pTable->tableId.uid,
+ pTable->tableId.tid, tstrerror(terrno));
+ break;
+ }
+ }
+
+ return code;
+}
+
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
pCond->twindow = updateLastrowForEachGroup(groupList);
@@ -604,6 +627,8 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return NULL;
}
+ lazyLoadCacheLast(pQueryHandle);
+
int32_t code = checkForCachedLastRow(pQueryHandle, groupList);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
@@ -618,13 +643,14 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return pQueryHandle;
}
-
TsdbQueryHandleT tsdbQueryCacheLast(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
if (pQueryHandle == NULL) {
return NULL;
}
+ lazyLoadCacheLast(pQueryHandle);
+
int32_t code = checkForCachedLast(pQueryHandle);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
@@ -1518,7 +1544,7 @@ static void mergeTwoRowFromMem(STsdbQueryHandle* pQueryHandle, int32_t capacity,
int16_t offset;
bool isRow1DataRow = isDataRow(row1);
- bool isRow2DataRow;
+ bool isRow2DataRow = false;
bool isChosenRowDataRow;
int32_t chosen_itr;
void *value;
@@ -2758,6 +2784,9 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
}
int32_t i = 0, j = 0;
+
+ // lock pTable->lastCols[i] as it would be released when schema update(tsdbUpdateLastColSchema)
+ TSDB_RLOCK_TABLE(pTable);
while(i < tgNumOfCols && j < numOfCols) {
pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (pTable->lastCols[j].colId < pColInfo->info.colId) {
@@ -2844,6 +2873,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
i++;
j++;
}
+ TSDB_RUNLOCK_TABLE(pTable);
// leave the real ts column as the last row, because last function only (not stable) use the last row as res
if (priKey != TSKEY_INITIAL_VAL) {
@@ -3175,7 +3205,9 @@ int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle) {
int32_t code = 0;
- if (pQueryHandle->pTsdb && atomic_load_8(&pQueryHandle->pTsdb->hasCachedLastColumn)){
+ STsdbRepo* pRepo = pQueryHandle->pTsdb;
+
+ if (pRepo && CACHE_LAST_NULL_COLUMN(&(pRepo->config))) {
pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LAST;
}
@@ -3420,9 +3452,12 @@ void filterPrepare(void* expr, void* param) {
int dummy = -1;
SHashObj *pObj = NULL;
if (pInfo->sch.colId == TSDB_TBNAME_COLUMN_INDEX) {
- pObj = taosHashInit(256, taosGetDefaultHashFunction(pInfo->sch.type), true, false);
SArray *arr = (SArray *)(pCond->arr);
- for (size_t i = 0; i < taosArrayGetSize(arr); i++) {
+
+ size_t size = taosArrayGetSize(arr);
+ pObj = taosHashInit(size * 2, taosGetDefaultHashFunction(pInfo->sch.type), true, false);
+
+ for (size_t i = 0; i < size; i++) {
char* p = taosArrayGetP(arr, i);
strntolower_s(varDataVal(p), varDataVal(p), varDataLen(p));
taosHashPut(pObj, varDataVal(p), varDataLen(p), &dummy, sizeof(dummy));
@@ -3430,12 +3465,14 @@ void filterPrepare(void* expr, void* param) {
} else {
buildFilterSetFromBinary((void **)&pObj, pCond->pz, pCond->nLen);
}
+
pInfo->q = (char *)pObj;
} else if (pCond != NULL) {
uint32_t size = pCond->nLen * TSDB_NCHAR_SIZE;
if (size < (uint32_t)pSchema->bytes) {
size = pSchema->bytes;
}
+
// to make sure tonchar does not cause invalid write, since the '\0' needs at least sizeof(wchar_t) space.
pInfo->q = calloc(1, size + TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
tVariantDump(pCond, pInfo->q, pSchema->type, true);
@@ -3583,7 +3620,7 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC
return pTableGroup;
}
-int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len,
+int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, const char* pTagCond, size_t len,
STableGroupInfo* pGroupInfo, SColIndex* pColIndex, int32_t numOfCols) {
if (tsdbRLockRepoMeta(tsdb) < 0) goto _error;
@@ -3645,19 +3682,19 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo* tsdb, uint64_t uid, TSKEY skey, cons
} END_TRY
void *filterInfo = NULL;
-
+
ret = filterInitFromTree(expr, &filterInfo, 0);
if (ret != TSDB_CODE_SUCCESS) {
terrno = ret;
goto _error;
}
-
+
tsdbQueryTableList(pTable, res, filterInfo);
filterFreeInfo(filterInfo);
tExprTreeDestroy(expr, NULL);
-
+
pGroupInfo->numOfTables = (uint32_t)taosArrayGetSize(res);
pGroupInfo->pGroupList = createTableGroup(res, pTagSchema, pColIndex, numOfCols, skey);
@@ -3844,7 +3881,7 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
static FORCE_INLINE int32_t tsdbGetTagDataFromId(void *param, int32_t id, void **data) {
STable* pTable = (STable*)(SL_GET_NODE_DATA((SSkipListNode *)param));
-
+
if (id == TSDB_TBNAME_COLUMN_INDEX) {
*data = TABLE_NAME(pTable);
} else {
@@ -3877,7 +3914,7 @@ static void queryIndexedColumn(SSkipList* pSkipList, void* filterInfo, SArray* r
iter = tSkipListCreateIterFromVal(pSkipList, startVal, pSkipList->type, TSDB_ORDER_DESC);
FILTER_CLR_FLAG(order, TSDB_ORDER_DESC);
}
-
+
while (tSkipListIterNext(iter)) {
SSkipListNode *pNode = tSkipListIterGet(iter);
@@ -3886,7 +3923,7 @@ static void queryIndexedColumn(SSkipList* pSkipList, void* filterInfo, SArray* r
filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId);
all = filterExecute(filterInfo, 1, &addToResult, NULL, 0);
}
-
+
char *pData = SL_GET_NODE_DATA(pNode);
tsdbDebug("filter index column, table:%s, result:%d", ((STable *)pData)->name->data, all);
@@ -3918,7 +3955,7 @@ static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray*
SSkipListNode *pNode = tSkipListIterGet(iter);
filterSetColFieldData(filterInfo, pNode, tsdbGetTagDataFromId);
-
+
char *pData = SL_GET_NODE_DATA(pNode);
bool all = filterExecute(filterInfo, 1, &addToResult, NULL, 0);
@@ -3926,7 +3963,7 @@ static void queryIndexlessColumn(SSkipList* pSkipList, void* filterInfo, SArray*
if (all || (addToResult && *addToResult)) {
STableKeyInfo info = {.pTable = (void*)pData, .lastKey = TSKEY_INITIAL_VAL};
taosArrayPush(res, &info);
- }
+ }
}
tfree(addToResult);
@@ -3939,9 +3976,9 @@ static int32_t tsdbQueryTableList(STable* pTable, SArray* pRes, void* filterInfo
STSchema* pTSSchema = pTable->tagSchema;
bool indexQuery = false;
SSkipList *pSkipList = pTable->pIndex;
-
+
filterIsIndexedColumnQuery(filterInfo, pTSSchema->columns->colId, &indexQuery);
-
+
if (indexQuery) {
queryIndexedColumn(pSkipList, filterInfo, pRes);
} else {
diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h
index 3ba0031a07a83d7374b3f940cf51bf31478cfd38..c52fbf208f6fbf0384ecf66650919c4d12ae352e 100644
--- a/src/util/inc/tconfig.h
+++ b/src/util/inc/tconfig.h
@@ -20,7 +20,7 @@
extern "C" {
#endif
-#define TSDB_CFG_MAX_NUM 125
+#define TSDB_CFG_MAX_NUM 130
#define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41
diff --git a/src/util/src/talgo.c b/src/util/src/talgo.c
index 54b7e00eb7dd6f31ac8c8e6afa89790846abac5b..352cd3c05e4d588900b676b605964e068c4ed191 100644
--- a/src/util/src/talgo.c
+++ b/src/util/src/talgo.c
@@ -230,7 +230,7 @@ void taosheapadjust(void *base, int32_t size, int32_t start, int32_t end, const
{
int32_t parent;
int32_t child;
- char *buf;
+ char *buf = NULL;
if (base && size > 0 && compar) {
parent = start;
diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c
index 179fbd05a5a8f5ddfb28b68130f87e26ed4e522f..4bed561f71aef6aafcf4ec2af814c0a2c7e6b63f 100644
--- a/src/util/src/tcompare.c
+++ b/src/util/src/tcompare.c
@@ -417,14 +417,20 @@ int32_t compareFindItemInSet(const void *pLeft, const void* pRight) {
int32_t compareWStrPatternComp(const void* pLeft, const void* pRight) {
SPatternCompareInfo pInfo = {'%', '_'};
+ size_t size = varDataLen(pLeft)/TSDB_NCHAR_SIZE;
assert(varDataLen(pRight) <= TSDB_MAX_FIELD_LEN * TSDB_NCHAR_SIZE);
wchar_t *pattern = calloc(varDataLen(pRight) + 1, sizeof(wchar_t));
+ wchar_t *str = calloc(size + 1, sizeof(wchar_t));
+
memcpy(pattern, varDataVal(pRight), varDataLen(pRight));
+ memcpy(str, varDataVal(pLeft), size * sizeof(wchar_t));
+
+ int32_t ret = WCSPatternMatch(pattern, str, size, &pInfo);
- int32_t ret = WCSPatternMatch(pattern, varDataVal(pLeft), varDataLen(pLeft)/TSDB_NCHAR_SIZE, &pInfo);
free(pattern);
+ free(str);
return (ret == TSDB_PATTERN_MATCH) ? 0 : 1;
}
diff --git a/src/util/src/tcompression.c b/src/util/src/tcompression.c
index 48bba75926415752cfd777242a55ef71c5c96c2c..66150c46fb22a7225444b4e13b2d35fc636ea15b 100644
--- a/src/util/src/tcompression.c
+++ b/src/util/src/tcompression.c
@@ -346,7 +346,7 @@ int tsCompressBoolImp(const char *const input, const int nelements, char *const
/* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */
output[pos] |= t;
} else {
- uError("Invalid compress bool value:%d", output[pos]);
+ uError("Invalid compress bool value:%d", input[i]);
return -1;
}
}
diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c
index 0d335ca2664ffee75a79144b97181a5b625df66d..a2eea5aa7d99a43f2cf7f0552e843ce9a52034c0 100644
--- a/src/util/src/tlog.c
+++ b/src/util/src/tlog.c
@@ -566,7 +566,7 @@ static int32_t taosPushLogBuffer(SLogBuff *tLogBuff, char *msg, int32_t msgLen)
int32_t end = 0;
int32_t remainSize = 0;
static int64_t lostLine = 0;
- char tmpBuf[40] = {0};
+ char tmpBuf[60] = {0};
int32_t tmpBufLen = 0;
if (tLogBuff == NULL || tLogBuff->stop) return -1;
diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c
index 6a37f11ecef376e70f4eefbf6446150bd350cf07..1ffa94b0df6b63dac914649c7003d37bbedbdb24 100644
--- a/src/util/src/tqueue.c
+++ b/src/util/src/tqueue.c
@@ -258,9 +258,9 @@ void taosCloseQset(taos_qset param) {
pthread_mutex_unlock(&qset->mutex);
pthread_mutex_destroy(&qset->mutex);
+ uTrace("qset:%p is closed", qset);
tsem_destroy(&qset->sem);
free(qset);
- uTrace("qset:%p is closed", qset);
}
// tsem_post 'qset->sem', so that reader threads waiting for it
diff --git a/src/util/src/tskiplist.c b/src/util/src/tskiplist.c
index 98fd9c094cba3e779c9f203fdacc548a3bda5ef4..2bdaeb07a16a638d75c1c5a8b20763e5c660a05f 100644
--- a/src/util/src/tskiplist.c
+++ b/src/util/src/tskiplist.c
@@ -144,7 +144,6 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
// backward to put the first data
hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
-
tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
for (int level = 0; level < pSkipList->maxLevel; level++) {
@@ -163,7 +162,12 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
for (int i = 0; i < pSkipList->maxLevel; i++) {
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
}
+ } else if(compare == 0) {
+ // same need special deal
+ forward[0] = SL_NODE_GET_BACKWARD_POINTER(SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail,0),0);
+ hasDup = true;
} else {
+ SSkipListNode *p = NULL;
SSkipListNode *px = pSkipList->pHead;
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
if (i < pSkipList->level) {
@@ -175,19 +179,29 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
}
}
- SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i);
+ // if px not head , must compare with px
+ if(px == pSkipList->pHead) {
+ p = SL_NODE_GET_FORWARD_POINTER(px, i);
+ } else {
+ p = px;
+ }
while (p != pSkipList->pTail) {
pKey = SL_GET_NODE_KEY(pSkipList, p);
compare = pSkipList->comparFn(pKey, pDataKey);
if (compare >= 0) {
- if (compare == 0 && !hasDup) hasDup = true;
+ if (compare == 0) {
+ hasDup = true;
+ forward[0] = SL_NODE_GET_BACKWARD_POINTER(p, 0);
+ }
break;
} else {
px = p;
p = SL_NODE_GET_FORWARD_POINTER(px, i);
}
}
+ // if found duplicate, immediately break, needn't continue to loop set rest forward[i] value
+ if(hasDup) break;
}
forward[i] = px;
diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c
index 99ade6db1a76fdac48e441a2c1a9a2a3d388f812..0c7b65be80685e79312906010f476073f71b350f 100644
--- a/src/util/src/ttokenizer.c
+++ b/src/util/src/ttokenizer.c
@@ -592,7 +592,7 @@ SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken)
size_t nsize = strlen(newToken);
int32_t size = (int32_t)strlen(*str) - token->n + (int32_t)nsize + 1;
int32_t bsize = (int32_t)((uint64_t)token->z - (uint64_t)src);
- SStrToken ntoken;
+ SStrToken ntoken = {0};
*str = calloc(1, size);
@@ -628,7 +628,7 @@ SStrToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr) {
t0.n = 0;
return t0;
}
-
+
t = str[++(*i)];
}
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index 35ec90cb2efbb270c8b007f9bdb347333a87fded..31556b83d06224d285db29650eba82c4d3acab5e 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -53,13 +53,13 @@ int32_t strdequote(char *z) {
}
-int32_t strRmquote(char *z, int32_t len){
+int32_t strRmquote(char *z, int32_t len){
// delete escape character: \\, \', \"
char delim = z[0];
if (delim != '\'' && delim != '\"') {
return len;
}
-
+
int32_t cnt = 0;
int32_t j = 0;
for (uint32_t k = 1; k < len - 1; ++k) {
@@ -74,23 +74,24 @@ int32_t strRmquote(char *z, int32_t len){
continue;
}
}
-
+
z[j] = z[k];
j++;
}
-
+
z[j] = 0;
-
+
return len - 2 - cnt;
}
int32_t strRmquoteEscape(char *z, int32_t len) {
if (len <= 0) return len;
-
+
if (z[0] == '\'' || z[0] == '\"') {
return strRmquote(z, len);
} else if (len > 1 && z[0] == TS_ESCAPE_CHAR && z[len - 1] == TS_ESCAPE_CHAR) {
memmove(z, z + 1, len - 2);
+ z[len - 2] = '\0';
return len - 2;
}
diff --git a/src/util/src/tworker.c b/src/util/src/tworker.c
index 8b4053bccd1ce8d9d3f58328d838f4ba5132a100..55604b417ee7d32dd174df01ef4f170923ddb327 100644
--- a/src/util/src/tworker.c
+++ b/src/util/src/tworker.c
@@ -91,6 +91,6 @@ void *tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle) {
}
void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue) {
- taosCloseQueue(pQueue);
uDebug("worker:%s, queue:%p is freed", pPool->name, pQueue);
+ taosCloseQueue(pQueue);
}
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index 64f87ba5caddbb8b9cf90c2a13fa4029a9821ab0..e8495cac6d7de10018757876fba3674bda0e6231 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -20,6 +20,7 @@
#include "tglobal.h"
#include "query.h"
#include "vnodeStatus.h"
+#include "tgrant.h"
int32_t vNumOfExistedQHandle; // current initialized and existed query handle in current dnode
@@ -55,6 +56,11 @@ int32_t vnodeProcessRead(void *vparam, SVReadMsg *pRead) {
}
static int32_t vnodeCheckRead(SVnodeObj *pVnode) {
+ if (grantCheck(TSDB_GRANT_TIME) != TSDB_CODE_SUCCESS) {
+ vDebug("vgId:%d, grant expired, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode);
+ return TSDB_CODE_GRANT_EXPIRED;
+ }
+
if (!vnodeInReadyStatus(pVnode)) {
vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status],
pVnode->refCount, pVnode);
diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c
index 7f7d37a255ff42ab47be94507a33647fce853e8e..35c2ab72dfdf55f66b1095c757fc4f90656c842b 100644
--- a/src/vnode/src/vnodeWrite.c
+++ b/src/vnode/src/vnodeWrite.c
@@ -169,8 +169,8 @@ static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pR
}
static int32_t vnodeCheckWal(SVnodeObj *pVnode) {
- if (tsdbIsNeedCommit(pVnode->tsdb)) {
- return tsdbCheckWal(pVnode->tsdb, walGetFSize(pVnode->wal) >> 20);
+ if (pVnode->isCommiting == 0) {
+ return tsdbCheckWal(pVnode->tsdb, (uint32_t)(walGetFSize(pVnode->wal) >> 20));
}
return 0;
}
@@ -189,7 +189,7 @@ static int32_t vnodeProcessCreateTableMsg(SVnodeObj *pVnode, void *pCont, SRspRe
ASSERT(code != 0);
}
- if (((++pVnode->tblMsgVer) & 16383) == 0) { // lazy check
+ if (((++pVnode->tblMsgVer) & 32767) == 0) { // lazy check
vnodeCheckWal(pVnode);
}
diff --git a/tests/examples/JDBC/connectionPools/pom.xml b/tests/examples/JDBC/connectionPools/pom.xml
index 34518900ed30f48effd47a8786233080f3e5291f..81c549274c81ddc69d52508c46cd215edd8c5467 100644
--- a/tests/examples/JDBC/connectionPools/pom.xml
+++ b/tests/examples/JDBC/connectionPools/pom.xml
@@ -18,7 +18,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 2.0.18
+ 2.0.34
diff --git a/tests/examples/JDBC/readme.md b/tests/examples/JDBC/readme.md
index 9a017f4feab148cb7c3fd4132360c3075c6573cb..35dfb341d7d62bb283897523f928e04dabea962d 100644
--- a/tests/examples/JDBC/readme.md
+++ b/tests/examples/JDBC/readme.md
@@ -10,4 +10,4 @@
| 6 | taosdemo | This is an internal tool for testing Our JDBC-JNI, JDBC-RESTful, RESTful interfaces |
-more detail: https://www.taosdata.com/cn//documentation20/connector-java/
\ No newline at end of file
+more detail: https://www.taosdata.com/cn/documentation20/connector/java
diff --git a/tests/examples/c/apitest.c b/tests/examples/c/apitest.c
index 2510035e9217f4907ac8fdd3d11d7fc123a2bfa6..2c197887e7d7f1e6397213641a02ee8b37a84190 100644
--- a/tests/examples/c/apitest.c
+++ b/tests/examples/c/apitest.c
@@ -15,7 +15,7 @@ static void prepare_data(TAOS* taos) {
result = taos_query(taos, "drop database if exists test;");
taos_free_result(result);
usleep(100000);
- result = taos_query(taos, "create database test precision 'us';");
+ result = taos_query(taos, "create database test precision 'ns';");
taos_free_result(result);
usleep(100000);
taos_select_db(taos, "test");
@@ -293,7 +293,7 @@ void verify_schema_less(TAOS* taos) {
result = taos_query(taos, "drop database if exists test;");
taos_free_result(result);
usleep(100000);
- result = taos_query(taos, "create database test precision 'us' update 1;");
+ result = taos_query(taos, "create database test precision 'ns' update 1 keep 36500;");
taos_free_result(result);
usleep(100000);
@@ -401,6 +401,21 @@ void verify_schema_less(TAOS* taos) {
}
taos_free_result(result);
+ //Test timestamp precision
+ char* lines7[] = {
+ "stts,t1=10i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1",
+ };
+
+ for (int precision = TSDB_SML_TIMESTAMP_HOURS; precision <= TSDB_SML_TIMESTAMP_NANO_SECONDS; ++precision) {
+ result = taos_schemaless_insert(taos, lines7, 1, TSDB_SML_LINE_PROTOCOL, precision);
+ code = taos_errno(result);
+ if (code != TSDB_CODE_SUCCESS) {
+ affected_rows = taos_affected_rows(result);
+ printf("\033[31m [lines7_%d]taos_schemaless_insert failed, code: %d,%s, affected rows:%d \033[0m\n", precision, code, taos_errstr(result), affected_rows);
+ }
+ taos_free_result(result);
+ }
+
}
int main(int argc, char* argv[]) {
diff --git a/tests/examples/c/epoll.c b/tests/examples/c/epoll.c
index 0fb8754de666d7067ef3dcbf9b7797592ca5b61b..05df33ffe6f0c08dd5608bb3ba30a21623f2ae45 100644
--- a/tests/examples/c/epoll.c
+++ b/tests/examples/c/epoll.c
@@ -92,7 +92,7 @@ static void null_event(ep_t *ep, struct epoll_event *ev, fde_t *client);
fprintf(stderr, "" fmt "\n", ##__VA_ARGS__); \
} \
fprintf(stderr, "usage:\n"); \
- fprintf(stderr, " %s -l : specify listenning port\n", arg0); \
+ fprintf(stderr, " %s -l : specify listening port\n", arg0); \
} while (0)
int main(int argc, char *argv[]) {
@@ -256,7 +256,7 @@ static int open_listen(unsigned short port) {
E("getsockname() failed");
}
A(len == sizeof(si), "internal logic error");
- D("listenning at: %d", ntohs(si.sin_port));
+ D("listening at: %d", ntohs(si.sin_port));
return skt;
} while (0);
close(skt);
diff --git a/tests/examples/python/taosdemo/taosdemo.py b/tests/examples/python/taosdemo/taosdemo.py
index d55023bdbf119544a788aa6246c9d63dbf024872..4aaf00157c5fe5bbeec27b001f663a94c1d89439 100755
--- a/tests/examples/python/taosdemo/taosdemo.py
+++ b/tests/examples/python/taosdemo/taosdemo.py
@@ -21,78 +21,91 @@ import json
import random
import time
import datetime
+import multiprocessing
from multiprocessing import Manager, Pool, Lock
from multipledispatch import dispatch
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
@dispatch(str, str)
-def v_print(msg: str, arg: str):
+def v_print(msg, arg):
+ # type: (str, str) -> None
if verbose:
print(msg % arg)
@dispatch(str, str, str)
-def v_print(msg: str, arg1: str, arg2: str):
+def v_print(msg, arg1, arg2):
+ # type: (str, str, str) -> None
if verbose:
print(msg % (arg1, arg2))
@dispatch(str, str, str, str)
-def v_print(msg: str, arg1: str, arg2: str, arg3: str):
+def v_print(msg, arg1, arg2, arg3):
+ # type: (str, str, str, str) -> None
if verbose:
print(msg % (arg1, arg2, arg3))
@dispatch(str, str, str, str, str)
-def v_print(msg: str, arg1: str, arg2: str, arg3: str, arg4: str):
+def v_print(msg, arg1, arg2, arg3, arg4):
+ # type: (str, str, str, str, str) -> None
if verbose:
print(msg % (arg1, arg2, arg3, arg4))
@dispatch(str, int)
-def v_print(msg: str, arg: int):
+def v_print(msg, arg):
+ # type: (str, int) -> None
if verbose:
print(msg % int(arg))
@dispatch(str, int, str)
-def v_print(msg: str, arg1: int, arg2: str):
+def v_print(msg, arg1, arg2):
+ # type: (str, int, str) -> None
if verbose:
print(msg % (int(arg1), str(arg2)))
@dispatch(str, str, int)
-def v_print(msg: str, arg1: str, arg2: int):
+def v_print(msg, arg1, arg2):
+ # type: (str, str, int) -> None
if verbose:
print(msg % (arg1, int(arg2)))
@dispatch(str, int, int)
-def v_print(msg: str, arg1: int, arg2: int):
+def v_print(msg, arg1, arg2):
+ # type: (str, int, int) -> None
if verbose:
print(msg % (int(arg1), int(arg2)))
@dispatch(str, int, int, str)
-def v_print(msg: str, arg1: int, arg2: int, arg3: str):
+def v_print(msg, arg1, arg2, arg3):
+ # type: (str, int, int, str) -> None
if verbose:
print(msg % (int(arg1), int(arg2), str(arg3)))
@dispatch(str, int, int, int)
-def v_print(msg: str, arg1: int, arg2: int, arg3: int):
+def v_print(msg, arg1, arg2, arg3):
+ # type: (str, int, int, int) -> None
if verbose:
print(msg % (int(arg1), int(arg2), int(arg3)))
@dispatch(str, int, int, int, int)
-def v_print(msg: str, arg1: int, arg2: int, arg3: int, arg4: int):
+def v_print(msg, arg1, arg2, arg3, arg4):
+ # type: (str, int, int, int, int) -> None
if verbose:
print(msg % (int(arg1), int(arg2), int(arg3), int(arg4)))
-def restful_execute(host: str, port: int, user: str, password: str, cmd: str):
+def restful_execute(host, port, user, password, cmd):
+ # type: (str, int, str, str, str) -> None
url = "http://%s:%d/rest/sql" % (host, restPort)
v_print("restful_execute - cmd: %s", cmd)
@@ -112,7 +125,8 @@ def restful_execute(host: str, port: int, user: str, password: str, cmd: str):
print("resp: %s" % json.dumps(resp.json()))
-def query_func(process: int, thread: int, cmd: str):
+def query_func(process, thread, cmd):
+ # type: (int, int, str) -> None
v_print("%d process %d thread cmd: %s", process, thread, cmd)
if oneMoreHost != "NotSupported" and random.randint(
@@ -133,7 +147,8 @@ def query_func(process: int, thread: int, cmd: str):
host, port, user, password, cmd)
-def query_data_process(cmd: str):
+def query_data_process(cmd):
+ # type: (str) -> None
# establish connection if native
if native:
v_print("host:%s, user:%s passwd:%s configDir:%s ", host, user, password, configDir)
@@ -256,7 +271,8 @@ def drop_databases():
(dbName, i))
-def insert_func(process: int, thread: int):
+def insert_func(process, thread):
+ # type: (int, int) -> None
v_print("%d process %d thread, insert_func ", process, thread)
# generate uuid
@@ -374,7 +390,8 @@ def create_tb():
(tbName, j))
-def insert_data_process(lock, i: int, begin: int, end: int):
+def insert_data_process(lock, i, begin, end):
+ # type: (multiprocessing._LockType, int, int, int) -> None
lock.acquire()
tasks = end - begin
v_print("insert_data_process:%d table from %d to %d, tasks %d", i, begin, end, tasks)
@@ -675,7 +692,10 @@ if __name__ == "__main__":
printConfig()
if not skipPrompt:
- input("Press any key to continue..")
+ try:
+ input("Press any key to continue..")
+ except SyntaxError:
+ pass
# establish connection first if native
if native:
diff --git a/tests/pytest/client/twoClients.py b/tests/pytest/client/twoClients.py
index 358c4e851f7fa90caa8dd069e6b9b5064e44eb40..82191156bf425ca1f05e52516dbc04615255131c 100644
--- a/tests/pytest/client/twoClients.py
+++ b/tests/pytest/client/twoClients.py
@@ -36,7 +36,8 @@ class TwoClients:
tdDnodes.deploy(1)
tdDnodes.start(1)
- # first client create a stable and insert data
+ tdLog.sleep(2)
+ # first client create a stable and insert data
conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=tdDnodes.getSimCfgPath())
cursor1 = conn1.cursor()
cursor1.execute("drop database if exists db")
@@ -90,6 +91,8 @@ class TwoClients:
cursor2.close()
conn1.close()
conn2.close()
+
+ tdLog.success("%s successfully executed" % __file__)
clients = TwoClients()
clients.initConnection()
diff --git a/tests/pytest/compress/compressChangeVersion.py b/tests/pytest/compress/compressChangeVersion.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7b9ebe6b35dc4729e0dcae705ac7d93c73010e7
--- /dev/null
+++ b/tests/pytest/compress/compressChangeVersion.py
@@ -0,0 +1,109 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+sys.path.insert(0, os.getcwd())
+from util.log import *
+from util.sql import *
+from util.dnodes import *
+import taos
+import threading
+import subprocess
+from random import choice
+
+
+class TwoClients:
+ def initConnection(self):
+ self.host = "chenhaoran01"
+ self.user = "root"
+ self.password = "taosdata"
+ self.config = "/home/chr/cfg/single/"
+ self.port =6030
+ self.rowNum = 10
+ self.ts = 1537146000000
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def run(self):
+ buildPath = self.getBuildPath()
+ if (buildPath == ""):
+ tdLog.exit("taosd not found!")
+ else:
+ tdLog.info("taosd found in %s" % buildPath)
+ binPath = buildPath+ "/build/bin/"
+ walFilePath = "/var/lib/taos/mnode_bak/wal/"
+
+ # new taos client
+ conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
+ print(conn1)
+ cur1 = conn1.cursor()
+ tdSql.init(cur1, True)
+
+ # create backgroud db and tb
+ tdSql.execute("drop database if exists db1")
+ os.system("%staosdemo -f compress/insertDataDb1.json -y " % binPath)
+ # create foreground db and tb
+ tdSql.execute("drop database if exists foredb")
+ tdSql.execute("create database foredb")
+ tdSql.execute("use foredb")
+ print("123test")
+ tdSql.execute("create stable if not exists stb (ts timestamp, dataInt int, dataDouble double,dataStr nchar(200)) tags(loc nchar(50),t1 int)")
+ tdSql.execute("create table tb1 using stb tags('beijing1', 10)")
+ tdSql.execute("insert into tb1 values(1614218412000,8635,98.861,'qazwsxedcrfvtgbyhnujmikolp1')(1614218422000,8636,98.862,'qazwsxedcrfvtgbyhnujmikolp2')")
+ tdSql.execute("create table tb2 using stb tags('beijing2', 11)")
+ tdSql.execute("insert into tb2 values(1614218432000,8647,98.863,'qazwsxedcrfvtgbyhnujmikolp3')")
+ tdSql.execute("insert into tb2 values(1614218442000,8648,98.864,'qazwsxedcrfvtgbyhnujmikolp4')")
+
+
+ # check data correct
+ tdSql.execute("use db1")
+ tdSql.query("select count(tbname) from stb0")
+ tdSql.checkData(0, 0, 50000)
+ tdSql.query("select count(*) from stb0")
+ tdSql.checkData(0, 0, 5000000)
+ tdSql.execute("use foredb")
+ tdSql.query("select count (tbname) from stb")
+ tdSql.checkData(0, 0, 2)
+ tdSql.query("select count (*) from stb")
+ tdSql.checkData(0, 0, 4)
+ tdSql.query("select * from tb1 order by ts")
+ tdSql.checkData(0, 3, "qazwsxedcrfvtgbyhnujmikolp1")
+ tdSql.query("select * from tb2 order by ts")
+ tdSql.checkData(1, 3, "qazwsxedcrfvtgbyhnujmikolp4")
+
+
+
+ # delete useless file
+ testcaseFilename = os.path.split(__file__)[-1]
+ os.system("rm -rf ./insert_res.txt")
+ # os.system("rm -rf compress/%s.sql" % testcaseFilename )
+
+clients = TwoClients()
+clients.initConnection()
+# clients.getBuildPath()
+clients.run()
\ No newline at end of file
diff --git a/tests/pytest/compress/insertDataDb1.json b/tests/pytest/compress/insertDataDb1.json
new file mode 100644
index 0000000000000000000000000000000000000000..65cec71a65ff4ef3814bee4949def151c32945ee
--- /dev/null
+++ b/tests/pytest/compress/insertDataDb1.json
@@ -0,0 +1,62 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file": "./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "db1",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 3650,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 50000,
+ "childtable_prefix": "stb00_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 100,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_limit": 0,
+ "childtable_offset":0,
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "",
+ "columns": [{"type": "INT"},{"type": "TINYINT"},{"type": "smallint"},{"type": "bool"},{"type": "bigint"},{"type": "float"},{"type": "double"}, {"type": "BINARY","len": 32}, {"type": "nchar","len": 32}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
+ }]
+ }]
+}
+
diff --git a/tests/pytest/dbmgmt/nanoSecondCheck.py b/tests/pytest/dbmgmt/nanoSecondCheck.py
index a5e9adacee53a9172a2d8990ccc4d83feb983bdd..15f22e3dbef937df9e2115510c938d077e97fbb3 100644
--- a/tests/pytest/dbmgmt/nanoSecondCheck.py
+++ b/tests/pytest/dbmgmt/nanoSecondCheck.py
@@ -31,22 +31,23 @@ class TDTestCase:
tdSql.prepare()
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
- tdSql.execute('create database db precision "ns";')
- tdSql.query('show databases;')
+ tdSql.error('create database db keep 365000')
+ tdSql.execute('create database db precision "ns"')
+ tdSql.query('show databases')
tdSql.checkData(0,16,'ns')
tdSql.execute('use db')
tdLog.debug('testing nanosecond support in 1st timestamp')
tdSql.execute('create table tb (ts timestamp, speed int)')
- tdSql.execute('insert into tb values(\'2021-06-10 0:00:00.100000001\', 1);')
- tdSql.execute('insert into tb values(1623254400150000000, 2);')
- tdSql.execute('import into tb values(1623254400300000000, 3);')
- tdSql.execute('import into tb values(1623254400299999999, 4);')
- tdSql.execute('insert into tb values(1623254400300000001, 5);')
- tdSql.execute('insert into tb values(1623254400999999999, 7);')
+ tdSql.execute('insert into tb values(\'2021-06-10 0:00:00.100000001\', 1)')
+ tdSql.execute('insert into tb values(1623254400150000000, 2)')
+ tdSql.execute('import into tb values(1623254400300000000, 3)')
+ tdSql.execute('import into tb values(1623254400299999999, 4)')
+ tdSql.execute('insert into tb values(1623254400300000001, 5)')
+ tdSql.execute('insert into tb values(1623254400999999999, 7)')
- tdSql.query('select * from tb;')
+ tdSql.query('select * from tb')
tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001')
tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000')
tdSql.checkData(2,0,'2021-06-10 0:00:00.299999999')
@@ -54,61 +55,61 @@ class TDTestCase:
tdSql.checkData(4,1,5)
tdSql.checkData(5,1,7)
tdSql.checkRows(6)
- tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002;')
+ tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400100000002')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\';')
+ tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000001\' and ts < \'2021-06-10 0:00:00.160000000\'')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000;')
+ tdSql.query('select count(*) from tb where ts > 1623254400100000000 and ts < 1623254400150000000')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\';')
+ tdSql.query('select count(*) from tb where ts > \'2021-06-10 0:00:00.100000000\' and ts < \'2021-06-10 0:00:00.150000000\'')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts > 1623254400400000000;')
+ tdSql.query('select count(*) from tb where ts > 1623254400400000000')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\';')
+ tdSql.query('select count(*) from tb where ts < \'2021-06-10 00:00:00.400000000\'')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb where ts > now + 400000000b;')
+ tdSql.query('select count(*) from tb where ts > now + 400000000b')
tdSql.checkRows(0)
- tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\';')
+ tdSql.query('select count(*) from tb where ts >= \'2021-06-10 0:00:00.100000001\'')
tdSql.checkData(0,0,6)
- tdSql.query('select count(*) from tb where ts <= 1623254400300000000;')
+ tdSql.query('select count(*) from tb where ts <= 1623254400300000000')
tdSql.checkData(0,0,4)
- tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\';')
+ tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.000000000\'')
tdSql.checkRows(0)
- tdSql.query('select count(*) from tb where ts = 1623254400150000000;')
+ tdSql.query('select count(*) from tb where ts = 1623254400150000000')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\';')
+ tdSql.query('select count(*) from tb where ts = \'2021-06-10 0:00:00.100000001\'')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000;')
+ tdSql.query('select count(*) from tb where ts between 1623254400000000000 and 1623254400400000000')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\';')
+ tdSql.query('select count(*) from tb where ts between \'2021-06-10 0:00:00.299999999\' and \'2021-06-10 0:00:00.300000001\'')
tdSql.checkData(0,0,3)
- tdSql.query('select avg(speed) from tb interval(5000000000b);')
+ tdSql.query('select avg(speed) from tb interval(5000000000b)')
tdSql.checkRows(1)
tdSql.query('select avg(speed) from tb interval(100000000b)')
tdSql.checkRows(4)
- tdSql.error('select avg(speed) from tb interval(1b);')
- tdSql.error('select avg(speed) from tb interval(999b);')
+ tdSql.error('select avg(speed) from tb interval(1b)')
+ tdSql.error('select avg(speed) from tb interval(999b)')
- tdSql.query('select avg(speed) from tb interval(1000b);')
+ tdSql.query('select avg(speed) from tb interval(1000b)')
tdSql.checkRows(5)
- tdSql.query('select avg(speed) from tb interval(1u);')
+ tdSql.query('select avg(speed) from tb interval(1u)')
tdSql.checkRows(5)
- tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b);')
+ tdSql.query('select avg(speed) from tb interval(100000000b) sliding (100000000b)')
tdSql.checkRows(4)
tdSql.query('select last(*) from tb')
@@ -119,20 +120,20 @@ class TDTestCase:
tdSql.checkData(0,0, 1623254400100000001)
tdSql.checkData(0,0, '2021-06-10 0:00:00.100000001')
- tdSql.execute('insert into tb values(now + 500000000b, 6);')
- tdSql.query('select * from tb;')
+ tdSql.execute('insert into tb values(now + 500000000b, 6)')
+ tdSql.query('select * from tb')
tdSql.checkRows(7)
tdLog.debug('testing nanosecond support in other timestamps')
- tdSql.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp);')
- tdSql.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\');')
- tdSql.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000);')
- tdSql.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000);')
- tdSql.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999);')
- tdSql.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001);')
- tdSql.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999);')
-
- tdSql.query('select * from tb2;')
+ tdSql.execute('create table tb2 (ts timestamp, speed int, ts2 timestamp)')
+ tdSql.execute('insert into tb2 values(\'2021-06-10 0:00:00.100000001\', 1, \'2021-06-11 0:00:00.100000001\')')
+ tdSql.execute('insert into tb2 values(1623254400150000000, 2, 1623340800150000000)')
+ tdSql.execute('import into tb2 values(1623254400300000000, 3, 1623340800300000000)')
+ tdSql.execute('import into tb2 values(1623254400299999999, 4, 1623340800299999999)')
+ tdSql.execute('insert into tb2 values(1623254400300000001, 5, 1623340800300000001)')
+ tdSql.execute('insert into tb2 values(1623254400999999999, 7, 1623513600999999999)')
+
+ tdSql.query('select * from tb2')
tdSql.checkData(0,0,'2021-06-10 0:00:00.100000001')
tdSql.checkData(1,0,'2021-06-10 0:00:00.150000000')
tdSql.checkData(2,1,4)
@@ -140,72 +141,72 @@ class TDTestCase:
tdSql.checkData(4,2,'2021-06-11 00:00:00.300000001')
tdSql.checkData(5,2,'2021-06-13 00:00:00.999999999')
tdSql.checkRows(6)
- tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000;')
+ tdSql.query('select count(*) from tb2 where ts2 > 1623340800000000000 and ts2 < 1623340800150000000')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\';')
+ tdSql.query('select count(*) from tb2 where ts2 > \'2021-06-11 0:00:00.100000000\' and ts2 < \'2021-06-11 0:00:00.100000002\'')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000;')
+ tdSql.query('select count(*) from tb2 where ts2 > 1623340800500000000')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\';')
+ tdSql.query('select count(*) from tb2 where ts2 < \'2021-06-11 0:00:00.400000000\'')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b;')
+ tdSql.query('select count(*) from tb2 where ts2 > now + 400000000b')
tdSql.checkRows(0)
- tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\';')
+ tdSql.query('select count(*) from tb2 where ts2 >= \'2021-06-11 0:00:00.100000001\'')
tdSql.checkData(0,0,6)
- tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000;')
+ tdSql.query('select count(*) from tb2 where ts2 <= 1623340800400000000')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\';')
+ tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.000000000\'')
tdSql.checkRows(0)
- tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\';')
+ tdSql.query('select count(*) from tb2 where ts2 = \'2021-06-11 0:00:00.300000001\'')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001;')
+ tdSql.query('select count(*) from tb2 where ts2 = 1623340800300000001')
tdSql.checkData(0,0,1)
- tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000;')
+ tdSql.query('select count(*) from tb2 where ts2 between 1623340800000000000 and 1623340800450000000')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\';')
+ tdSql.query('select count(*) from tb2 where ts2 between \'2021-06-11 0:00:00.299999999\' and \'2021-06-11 0:00:00.300000001\'')
tdSql.checkData(0,0,3)
- tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999;')
+ tdSql.query('select count(*) from tb2 where ts2 <> 1623513600999999999')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\';')
+ tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000001\'')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\';')
+ tdSql.query('select count(*) from tb2 where ts2 <> \'2021-06-11 0:00:00.100000000\'')
tdSql.checkData(0,0,6)
- tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999;')
+ tdSql.query('select count(*) from tb2 where ts2 != 1623513600999999999')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\';')
+ tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000001\'')
tdSql.checkData(0,0,5)
- tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\';')
+ tdSql.query('select count(*) from tb2 where ts2 != \'2021-06-11 0:00:00.100000000\'')
tdSql.checkData(0,0,6)
- tdSql.execute('insert into tb2 values(now + 500000000b, 6, now +2d);')
- tdSql.query('select * from tb2;')
+ tdSql.execute('insert into tb2 values(now + 500000000b, 6, now +2d)')
+ tdSql.query('select * from tb2')
tdSql.checkRows(7)
tdLog.debug('testing ill nanosecond format handling')
- tdSql.execute('create table tb3 (ts timestamp, speed int);')
+ tdSql.execute('create table tb3 (ts timestamp, speed int)')
- tdSql.error('insert into tb3 values(16232544001500000, 2);')
- tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2);')
- tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\';')
+ tdSql.error('insert into tb3 values(16232544001500000, 2)')
+ tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456\', 2)')
+ tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456000\'')
tdSql.checkRows(1)
- tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2);')
- tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\';')
+ tdSql.execute('insert into tb3 values(\'2021-06-10 0:00:00.123456789000\', 2)')
+ tdSql.query('select * from tb3 where ts = \'2021-06-10 0:00:00.123456789\'')
tdSql.checkRows(1)
os.system('sudo timedatectl set-ntp on')
@@ -216,4 +217,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index f54a6c4bbd6d7c10d94a59d6eae1f3aff00bf298..8af0c4642e7452cf835442b17e28d9d7c498bde0 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -27,7 +27,7 @@ python3 ./test.py -f insert/bug3654.py
python3 ./test.py -f insert/insertDynamicColBeforeVal.py
python3 ./test.py -f insert/in_function.py
python3 ./test.py -f insert/modify_column.py
-python3 ./test.py -f insert/line_insert.py
+#python3 ./test.py -f insert/line_insert.py
python3 ./test.py -f insert/specialSql.py
# timezone
@@ -273,6 +273,7 @@ python3 ./test.py -f query/queryStateWindow.py
# python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
python3 ./test.py -f query/nestquery_last_row.py
python3 ./test.py -f query/nestedQuery/nestedQuery.py
+python3 ./test.py -f query/nestedQuery/nestedQuery_datacheck.py
python3 ./test.py -f query/queryCnameDisplay.py
# python3 ./test.py -f query/operator_cost.py
# python3 ./test.py -f query/long_where_query.py
@@ -313,7 +314,7 @@ python3 testNoCompress.py
python3 testMinTablesPerVnode.py
python3 queryCount.py
python3 ./test.py -f query/queryGroupbyWithInterval.py
-#python3 client/twoClients.py
+python3 client/twoClients.py
python3 test.py -f query/queryInterval.py
python3 test.py -f query/queryFillTest.py
# subscribe
@@ -399,13 +400,12 @@ python3 test.py -f alter/alter_cacheLastRow.py
python3 ./test.py -f query/querySession.py
python3 test.py -f alter/alter_create_exception.py
python3 ./test.py -f insert/flushwhiledrop.py
-#python3 ./test.py -f insert/schemalessInsert.py
python3 ./test.py -f alter/alterColMultiTimes.py
python3 ./test.py -f query/queryWildcardLength.py
python3 ./test.py -f query/queryTbnameUpperLower.py
python3 ./test.py -f query/query.py
-python3 ./test.py -f query/queryDiffColsOr.py
+python3 ./test.py -f query/queryDiffColsTagsAndOr.py
python3 ./test.py -f client/nettest.py
@@ -416,6 +416,10 @@ python3 ./test.py -f insert/verifyMemToDiskCrash.py
python3 ./test.py -f query/queryRegex.py
python3 ./test.py -f tools/taosdemoTestdatatype.py
+#python3 ./test.py -f insert/schemalessInsert.py
+#python3 ./test.py -f insert/openTsdbTelnetLinesInsert.py
+#python3 ./test.py -f insert/openTsdbJsonInsert.py
+
#======================p4-end===============
diff --git a/tests/pytest/functions/function_derivative.py b/tests/pytest/functions/function_derivative.py
index d4d8ab80a6b9587df900890ef18b8c4b1e3906bd..e90a7671197ae9abe7c4463308b480849769f2fe 100644
--- a/tests/pytest/functions/function_derivative.py
+++ b/tests/pytest/functions/function_derivative.py
@@ -147,6 +147,9 @@ class TDTestCase:
tdSql.error("select derivative(col, 10s, 1) from stb group by id")
tdSql.error("select derivative(col, 999ms, 1) from stb group by id")
tdSql.error("select derivative(col, 10s, 2) from stb group by id")
+ tdSql.error("select derivative(col, -106752999999999922222d, 0) from stb group by tbname"); #overflow error
+ tdSql.error("select derivative(col, 10y, 0) from stb group by tbname") #TD-10399, DB error: syntax error near '10y, 0) from stb group by tbname;'
+ tdSql.error("select derivative(col, -106752d, 0) from stb group by tbname") #TD-10398 overflow tips
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/functions/queryTestCases-td3690.py b/tests/pytest/functions/queryTestCases-td3690.py
new file mode 100644
index 0000000000000000000000000000000000000000..12b8d9dc90f063bfff96fceb39641b5352c6ec11
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td3690.py
@@ -0,0 +1,1588 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ # master branch
+ self.td3690()
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td4082.py b/tests/pytest/functions/queryTestCases-td4082.py
new file mode 100644
index 0000000000000000000000000000000000000000..73f03530a4db222b199352ec582617db394f34dd
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td4082.py
@@ -0,0 +1,1586 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ self.td4082()
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td4097.py b/tests/pytest/functions/queryTestCases-td4097.py
new file mode 100644
index 0000000000000000000000000000000000000000..99c5f569825f631c9401a8db9994263834b30389
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td4097.py
@@ -0,0 +1,1587 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ self.td4097()
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td4288.py b/tests/pytest/functions/queryTestCases-td4288.py
new file mode 100644
index 0000000000000000000000000000000000000000..855dbd3bd8c6921fd787a137bf228f55765ab5f2
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td4288.py
@@ -0,0 +1,1587 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ self.td4288()
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td4724.py b/tests/pytest/functions/queryTestCases-td4724.py
new file mode 100644
index 0000000000000000000000000000000000000000..be3aa4be9b7811569148b6e1c3f708427e132567
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td4724.py
@@ -0,0 +1,1587 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ self.td4724()
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td5790.py b/tests/pytest/functions/queryTestCases-td5790.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d7cfe68adc913e53bff098a446350325b0325ab
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td5790.py
@@ -0,0 +1,1588 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ # master branch
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+ self.td5798()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td5935.py b/tests/pytest/functions/queryTestCases-td5935.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3e925a400f6b0850753697b9766e14f6c0faac8
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td5935.py
@@ -0,0 +1,1587 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ self.td5935()
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases-td6068.py b/tests/pytest/functions/queryTestCases-td6068.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c3ffd998df391dd7f870fc73fcad01f484a914a
--- /dev/null
+++ b/tests/pytest/functions/queryTestCases-td6068.py
@@ -0,0 +1,1588 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import subprocess
+import random
+import math
+import numpy as np
+import inspect
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.dnodes import *
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug(f"start to execute {__file__}")
+ tdSql.init(conn.cursor(), logSql)
+
+ def getBuildPath(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/debug/build/bin")]
+ break
+ return buildPath
+
+ def getCfgDir(self) -> str:
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ cfgDir = self.getBuildPath() + "/community/sim/dnode1/cfg"
+ else:
+ cfgDir = self.getBuildPath() + "/sim/dnode1/cfg"
+ return cfgDir
+
+ def getCfgFile(self) -> str:
+ return self.getCfgDir()+"/taos.cfg"
+
+ def td3690(self):
+ tdLog.printNoPrefix("==========TD-3690==========")
+
+ tdSql.prepare()
+
+ tdSql.execute("show variables")
+ res_off = tdSql.cursor.fetchall()
+ resList = np.array(res_off)
+ index = np.where(resList == "offlineThreshold")
+ index_value = np.dstack((index[0])).squeeze()
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 864000)
+
+ def td4082(self):
+ tdLog.printNoPrefix("==========TD-4082==========")
+ tdSql.prepare()
+
+ cfgfile = self.getCfgFile()
+ max_compressMsgSize = 100000000
+
+ tdSql.execute("show variables")
+ res_com = tdSql.cursor.fetchall()
+ rescomlist = np.array(res_com)
+ cpms_index = np.where(rescomlist == "compressMsgSize")
+ index_value = np.dstack((cpms_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$a compressMSgSize {max_compressMsgSize}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 100000000)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, -1)
+
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ def td4097(self):
+ tdLog.printNoPrefix("==========TD-4097==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("create database if not exists db1 keep 3650")
+ tdSql.execute("create database if not exists new keep 3650")
+ tdSql.execute("create database if not exists private keep 3650")
+ tdSql.execute("create database if not exists db2 keep 3650")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t1 int)")
+ tdSql.execute("create stable db1.stb3 (ts timestamp, c1 int) tags(t1 int)")
+
+ tdSql.execute("create table db.t10 using db.stb1 tags(1)")
+ tdSql.execute("create table db.t11 using db.stb1 tags(2)")
+ tdSql.execute("create table db.t20 using db.stb2 tags(3)")
+ tdSql.execute("create table db1.t30 using db1.stb3 tags(4)")
+
+ # tdLog.printNoPrefix("==========TD-4097==========")
+ # 插入数据,然后进行show create 操作
+
+ # p1 不进入指定数据库
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db2")
+ tdSql.checkRows(1)
+ tdSql.query("show create database new")
+ tdSql.checkRows(1)
+ tdSql.query("show create database private")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stable stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table stb1")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.stb1, db.stb2")
+
+ # p2 进入指定数据库
+ tdSql.execute("use db")
+
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create database db1")
+ tdSql.checkRows(1)
+ tdSql.error("show create database ")
+ tdSql.error("show create databases db ")
+ tdSql.error("show create database db.stb1")
+ tdSql.error("show create database db0")
+ tdSql.error("show create database db db1")
+ tdSql.error("show create database db, db1")
+ tdSql.error("show create database stb1")
+ tdSql.error("show create database * ")
+
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db1.stb3")
+ tdSql.checkRows(1)
+ tdSql.error("show create stable db.t10")
+ tdSql.error("show create stable db")
+ tdSql.error("show create stable t10")
+ tdSql.error("show create stable db.stb0")
+ tdSql.error("show create stables stb1")
+ tdSql.error("show create stable ")
+ tdSql.error("show create stable *")
+ tdSql.error("show create stable db.stb1 db.stb2")
+ tdSql.error("show create stable stb1 stb2")
+ tdSql.error("show create stable db.stb1, db.stb2")
+ tdSql.error("show create stable stb1, stb2")
+
+ tdSql.query("show create table db.stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db.t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+ tdSql.query("show create table db1.t30")
+ tdSql.checkRows(1)
+ tdSql.error("show create table t30")
+ tdSql.error("show create table db.stb0")
+ tdSql.error("show create table db.t0")
+ tdSql.error("show create table db")
+ tdSql.error("show create tables stb1")
+ tdSql.error("show create tables t10")
+ tdSql.error("show create table ")
+ tdSql.error("show create table *")
+ tdSql.error("show create table db.stb1 db.stb2")
+ tdSql.error("show create table db.t11 db.t10")
+ tdSql.error("show create table db.stb1, db.stb2")
+ tdSql.error("show create table db.t11, db.t10")
+ tdSql.error("show create table stb1 stb2")
+ tdSql.error("show create table t11 t10")
+ tdSql.error("show create table stb1, stb2")
+ tdSql.error("show create table t11, t10")
+
+ # p3 删库删表后进行查询
+ tdSql.execute("drop table if exists t11")
+
+ tdSql.error("show create table t11")
+ tdSql.error("show create table db.t11")
+ tdSql.query("show create stable stb1")
+ tdSql.checkRows(1)
+ tdSql.query("show create table t10")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop stable if exists stb2")
+
+ tdSql.error("show create table stb2")
+ tdSql.error("show create table db.stb2")
+ tdSql.error("show create stable stb2")
+ tdSql.error("show create stable db.stb2")
+ tdSql.error("show create stable db.t20")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db1")
+ tdSql.error("show create database db1")
+ tdSql.error("show create stable db1.t31")
+ tdSql.error("show create stable db1.stb3")
+ tdSql.query("show create database db")
+ tdSql.checkRows(1)
+ tdSql.query("show create stable db.stb1")
+ tdSql.checkRows(1)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("drop database if exists db1")
+ tdSql.execute("drop database if exists new")
+ tdSql.execute("drop database if exists db2")
+ tdSql.execute("drop database if exists private")
+
+ def td4153(self):
+ tdLog.printNoPrefix("==========TD-4153==========")
+
+ pass
+
+ def td4288(self):
+ tdLog.printNoPrefix("==========TD-4288==========")
+ # keep ~ [days,365000]
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db")
+
+ tdSql.execute("show variables")
+ res_kp = tdSql.cursor.fetchall()
+ resList = np.array(res_kp)
+ keep_index = np.where(resList == "keep")
+ index_value = np.dstack((keep_index[0])).squeeze()
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.query("show databases")
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ days = tdSql.getData(0, 6)
+ tdSql.error("alter database db keep 3650001")
+ tdSql.error("alter database db keep 9")
+ tdSql.error("alter database db keep 0b")
+ tdSql.error("alter database db keep 3650,9,36500")
+ tdSql.error("alter database db keep 3650,3650,365001")
+ tdSql.error("alter database db keep 36500,a,36500")
+ tdSql.error("alter database db keep (36500,3650,3650)")
+ tdSql.error("alter database db keep [36500,3650,36500]")
+ tdSql.error("alter database db keep 36500,0xff,3650")
+ tdSql.error("alter database db keep 36500,0o365,3650")
+ tdSql.error("alter database db keep 36500,0A3Ch,3650")
+ tdSql.error("alter database db keep")
+ tdSql.error("alter database db keep0 36500")
+
+ tdSql.execute("alter database db keep 36500")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "36500,36500,36500")
+ else:
+ tdSql.checkData(0, 7, 36500)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db1")
+ tdSql.query("show databases")
+ if ("community" in selfPath):
+ tdSql.checkData(0, 7, "3650,3650,3650")
+ else:
+ tdSql.checkData(0, 7, 3650)
+
+ tdSql.query("show variables")
+ tdSql.checkData(index_value, 1, 3650)
+
+ tdSql.execute("alter database db1 keep 365")
+ tdSql.execute("drop database if exists db1")
+
+
+ pass
+
+ def td4724(self):
+ tdLog.printNoPrefix("==========TD-4724==========")
+ cfgfile = self.getCfgFile()
+ minTablesPerVnode = 5
+ maxTablesPerVnode = 10
+ maxVgroupsPerDb = 100
+
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+
+ tdDnodes.stop(index)
+ vnode_cmd = f"sed -i '$a maxVgroupsPerDb {maxVgroupsPerDb}' {cfgfile} "
+ min_cmd = f"sed -i '$a minTablesPerVnode {minTablesPerVnode}' {cfgfile} "
+ max_cmd = f"sed -i '$a maxTablesPerVnode {maxTablesPerVnode}' {cfgfile} "
+ try:
+ _ = subprocess.check_output(vnode_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(min_cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(max_cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+ insert_sql = "insert into "
+ for i in range(100):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ insert_sql += f" t1{i} values({1604298064000 + i*1000}, {i})"
+ tdSql.query("show dnodes")
+ vnode_count = tdSql.getData(0, 2)
+ if vnode_count <= 1:
+ tdLog.exit("vnode is less than 2")
+
+ tdSql.execute(insert_sql)
+ tdDnodes.stop(index)
+ cmd = f"sed -i '$d' {cfgfile}"
+ try:
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ _ = subprocess.check_output(cmd, shell=True).decode("utf-8")
+ except Exception as e:
+ raise e
+
+ tdDnodes.start(index)
+
+ pass
+
+ def td4889(self):
+ tdLog.printNoPrefix("==========TD-4889==========")
+ cfg = {
+ 'minRowsPerFileBlock': '10',
+ 'maxRowsPerFileBlock': '200',
+ 'minRows': '10',
+ 'maxRows': '200',
+ 'maxVgroupsPerDb': '100',
+ 'maxTablesPerVnode': '1200',
+ }
+ tdSql.query("show dnodes")
+ dnode_index = tdSql.getData(0,0)
+ tdDnodes.stop(dnode_index)
+ tdDnodes.deploy(dnode_index, cfg)
+ tdDnodes.start(dnode_index)
+
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650 blocks 3 minrows 10 maxrows 200")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t1 int)")
+
+ nowtime = int(round(time.time() * 1000))
+ for i in range(1000):
+ tdSql.execute(f"create table db.t1{i} using db.stb1 tags({i})")
+ sql = f"insert into db.t1{i} values"
+ for j in range(260):
+ sql += f"({nowtime-1000*i-j}, {i+j})"
+ # tdSql.execute(f"insert into db.t1{i} values (now-100d, {i+j})")
+ tdSql.execute(sql)
+
+ # tdDnodes.stop(dnode_index)
+ # tdDnodes.start(dnode_index)
+
+ tdSql.query("show vgroups")
+ index = tdSql.getData(0,0)
+ tdSql.checkData(0, 6, 0)
+ tdSql.execute(f"compact vnodes in({index})")
+ start_time = time.time()
+ while True:
+ tdSql.query("show vgroups")
+ if tdSql.getData(0, 6) != 0:
+ tdLog.printNoPrefix("show vgroups row:0 col:6 data:1 == expect:1")
+ break
+ run_time = time.time()-start_time
+ if run_time > 3:
+ tdLog.exit("compacting not occured")
+ # time.sleep(0.1)
+
+ pass
+
+ def td5168insert(self):
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 float, c2 float, c3 double, c4 double) tags(t1 int)")
+ tdSql.execute("create table db.t1 using db.stb1 tags(1)")
+
+ for i in range(5):
+ c1 = 1001.11 + i*0.1
+ c2 = 1001.11 + i*0.1 + 1*0.01
+ c3 = 1001.11 + i*0.1 + 2*0.01
+ c4 = 1001.11 + i*0.1 + 3*0.01
+ tdSql.execute(f"insert into db.t1 values ('2021-07-01 08:00:0{i}.000', {c1}, {c2}, {c3}, {c4})")
+
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:00.000', 1001.11, 1001.12, 1001.13, 1001.14)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:01.000', 1001.21, 1001.22, 1001.23, 1001.24)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:02.000', 1001.31, 1001.32, 1001.33, 1001.34)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:03.000', 1001.41, 1001.42, 1001.43, 1001.44)")
+ # tdSql.execute("insert into db.t1 values ('2021-07-01 08:00:04.000', 1001.51, 1001.52, 1001.53, 1001.54)")
+
+ # for i in range(1000000):
+ for i in range(10000):
+ random1 = random.uniform(1000,1001)
+ random2 = random.uniform(1000,1001)
+ random3 = random.uniform(1000,1001)
+ random4 = random.uniform(1000,1001)
+ tdSql.execute(f"insert into db.t1 values (now+{i}a, {random1}, {random2},{random3}, {random4})")
+
+ pass
+
+ def td5168(self):
+ tdLog.printNoPrefix("==========TD-5168==========")
+ # 插入小范围内的随机数
+ tdLog.printNoPrefix("=====step0: 默认情况下插入数据========")
+ self.td5168insert()
+
+ # 获取五个时间点的数据作为基准数值,未压缩情况下精准匹配
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # tdSql.query("select * from db.t1 limit 100,1")
+ # f10, f11, f12, f13 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000,1")
+ # f20, f21, f22, f23 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 10000,1")
+ # f30, f31, f32, f33 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 100000,1")
+ # f40, f41, f42, f43 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+ #
+ # tdSql.query("select * from db.t1 limit 1000000,1")
+ # f50, f51, f52, f53 = tdSql.getData(0,1), tdSql.getData(0,2), tdSql.getData(0,3), tdSql.getData(0,4)
+
+ # 关闭服务并获取未开启压缩情况下的数据容量
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+
+ cfgdir = self.getCfgDir()
+ cfgfile = self.getCfgFile()
+
+ lossy_cfg_cmd=f"grep lossyColumns {cfgfile}|awk '{{print $2}}'"
+ data_size_cmd = f"du -s {cfgdir}/../data/vnode/ | awk '{{print $1}}'"
+ dsize_init = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"close the lossyColumns,data size is: {dsize_init};the lossyColumns line is: {lossy_args}")
+
+ ###################################################
+ float_lossy = "float"
+ double_lossy = "double"
+ float_double_lossy = "float|double"
+ no_loosy = ""
+
+ double_precision_cmd = f"sed -i '$a dPrecision 0.000001' {cfgfile}"
+ _ = subprocess.check_output(double_precision_cmd, shell=True).decode("utf-8")
+
+ lossy_float_cmd = f"sed -i '$a lossyColumns {float_lossy}' {cfgfile} "
+ lossy_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {double_lossy}' {cfgfile} "
+ lossy_float_double_cmd = f"sed -i '$d' {cfgfile} && sed -i '$a lossyColumns {float_double_lossy}' {cfgfile} "
+ lossy_no_cmd = f"sed -i '$a lossyColumns {no_loosy}' {cfgfile} "
+
+ ###################################################
+
+ # 开启有损压缩,参数float,并启动服务插入数据
+ tdLog.printNoPrefix("=====step1: lossyColumns设置为float========")
+ lossy_float = subprocess.check_output(lossy_float_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ # c1, c2, c3, c4 = tdSql.getData(0, 1), tdSql.getData(0, 2), tdSql.getData(0, 3), tdSql.getData(0, 4)
+ for j in range(4):
+ # locals()["f" + str(j) + str(i)] = tdSql.getData(0, j+1)
+ # print(f"f{j}{i}:", locals()["f" + str(j) + str(i)])
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为float情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float = int(subprocess.check_output(data_size_cmd,shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数double,并启动服务
+ tdLog.printNoPrefix("=====step2: lossyColumns设置为double========")
+ lossy_double = subprocess.check_output(lossy_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为double情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_double};the lossyColumns line is: {lossy_args}")
+
+ # 修改有损压缩,参数 float&&double ,并启动服务
+ tdLog.printNoPrefix("=====step3: lossyColumns设置为 float&&double ========")
+ lossy_float_double = subprocess.check_output(lossy_float_double_cmd, shell=True).decode("utf-8")
+ tdDnodes.start(index)
+ self.td5168insert()
+
+ # 查询前面所述5个时间数据并与基准数值进行比较
+ for i in range(5):
+ tdSql.query(f"select * from db.t1 where ts='2021-07-01 08:00:0{i}.000' ")
+ for j in range(4):
+ tdSql.checkData(0, j+1, locals()["f" + str(j) + str(i)])
+
+ # 关闭服务并获取压缩参数为 float&&double 情况下的数据容量
+ tdDnodes.stop(index)
+ dsize_float_double = int(subprocess.check_output(data_size_cmd, shell=True).decode("utf-8"))
+ lossy_args = subprocess.check_output(lossy_cfg_cmd, shell=True).decode("utf-8")
+ tdLog.printNoPrefix(f"open the lossyColumns, data size is:{dsize_float_double};the lossyColumns line is: {lossy_args}")
+
+ if not ((dsize_float_double < dsize_init) and (dsize_double < dsize_init) and (dsize_float < dsize_init)) :
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.exit("压缩未生效")
+ else:
+ tdLog.printNoPrefix(f"When lossyColumns value is float, data size is: {dsize_float}")
+ tdLog.printNoPrefix(f"When lossyColumns value is double, data size is: {dsize_double}")
+ tdLog.printNoPrefix(f"When lossyColumns value is float and double, data size is: {dsize_float_double}")
+ tdLog.printNoPrefix(f"When lossyColumns is closed, data size is: {dsize_init}")
+ tdLog.printNoPrefix("压缩生效")
+
+ pass
+
+ def td5433(self):
+ tdLog.printNoPrefix("==========TD-5433==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int) tags(t0 tinyint, t1 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c1 int) tags(t0 binary(16), t1 binary(16))")
+ numtab=20000
+ for i in range(numtab):
+ sql = f"create table db.t{i} using db.stb1 tags({i%128}, {100+i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i*2})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i*3})")
+
+ tdSql.execute("create table db.t01 using db.stb2 tags('1', '100')")
+ tdSql.execute("create table db.t02 using db.stb2 tags('2', '200')")
+ tdSql.execute("create table db.t03 using db.stb2 tags('3', '300')")
+ tdSql.execute("create table db.t04 using db.stb2 tags('4', '400')")
+ tdSql.execute("create table db.t05 using db.stb2 tags('5', '500')")
+
+ tdSql.query("select distinct t1 from stb1 where t1 != '150'")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 != 150")
+ tdSql.checkRows(numtab-1)
+ tdSql.query("select distinct t1 from stb1 where t1 = 150")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1 where t1 = '150'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(numtab)
+
+ tdSql.query("select distinct t0 from stb1 where t0 != '2'")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 != 2")
+ tdSql.checkRows(127)
+ tdSql.query("select distinct t0 from stb1 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb1")
+ tdSql.checkRows(128)
+
+ tdSql.query("select distinct t1 from stb2 where t1 != '200'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 != 200")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t1 from stb2 where t1 = 200")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2 where t1 = '200'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t1 from stb2")
+ tdSql.checkRows(5)
+
+ tdSql.query("select distinct t0 from stb2 where t0 != '2'")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 != 2")
+ tdSql.checkRows(4)
+ tdSql.query("select distinct t0 from stb2 where t0 = 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2 where t0 = '2'")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0 from stb2")
+ tdSql.checkRows(5)
+
+ pass
+
+ def td5798(self):
+ tdLog.printNoPrefix("==========TD-5798 + TD-5810==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 int) tags(t0 tinyint, t1 int, t2 int)")
+ tdSql.execute("create stable db.stb2 (ts timestamp, c2 int, c3 binary(16)) tags(t2 binary(16), t3 binary(16), t4 int)")
+ maxRemainderNum=7
+ tbnum=101
+ for i in range(tbnum-1):
+ sql = f"create table db.t{i} using db.stb1 tags({i%maxRemainderNum}, {(i-1)%maxRemainderNum}, {i%2})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10d, {i}, {i%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-9d, {i}, {(i-1)%3})")
+ tdSql.execute(f"insert into db.t{i} values (now-8d, {i}, {(i-2)%3})")
+ tdSql.execute(f"insert into db.t{i} (ts )values (now-7d)")
+
+ tdSql.execute(f"create table db.t0{i} using db.stb2 tags('{i%maxRemainderNum}', '{(i-1)%maxRemainderNum}', {i%3})")
+ tdSql.execute(f"insert into db.t0{i} values (now-10d, {i}, '{(i+1)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-9d, {i}, '{(i+2)%3}')")
+ tdSql.execute(f"insert into db.t0{i} values (now-8d, {i}, '{(i)%3}')")
+ tdSql.execute(f"insert into db.t0{i} (ts )values (now-7d)")
+ tdSql.execute("create table db.t100num using db.stb1 tags(null, null, null)")
+ tdSql.execute("create table db.t0100num using db.stb2 tags(null, null, null)")
+ tdSql.execute(f"insert into db.t100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t100num (ts )values (now-7d)")
+ tdSql.execute(f"insert into db.t0100num values (now-10d, {tbnum-1}, 1)")
+ tdSql.execute(f"insert into db.t0100num values (now-9d, {tbnum-1}, 0)")
+ tdSql.execute(f"insert into db.t0100num values (now-8d, {tbnum-1}, 2)")
+ tdSql.execute(f"insert into db.t0100num (ts )values (now-7d)")
+
+ #========== TD-5810 suport distinct multi-data-coloumn ==========
+ tdSql.query(f"select distinct c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c2 from stb1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1,c1 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from stb1 where c1 <{tbnum} limit 3 offset {tbnum*3-2}")
+ tdSql.checkRows(2)
+
+ tdSql.query(f"select distinct c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c2 from t1")
+ tdSql.checkRows(4)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c1 from t1 ")
+ tdSql.checkRows(2)
+ tdSql.query(f"select distinct c1,c1 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(1)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from t1 where c1 <{tbnum} limit 3 offset 2")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select distinct c3 from stb2 where c2 <{tbnum} ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c3, c2 from stb2 where c2 <{tbnum} limit 2")
+ tdSql.checkRows(2)
+
+ tdSql.error("select distinct c5 from stb1")
+ tdSql.error("select distinct c5 from t1")
+ tdSql.error("select distinct c1 from db.*")
+ tdSql.error("select c2, distinct c1 from stb1")
+ tdSql.error("select c2, distinct c1 from t1")
+ tdSql.error("select distinct c2 from ")
+ tdSql.error("distinct c2 from stb1")
+ tdSql.error("distinct c2 from t1")
+ tdSql.error("select distinct c1, c2, c3 from stb1")
+ tdSql.error("select distinct c1, c2, c3 from t1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from stb1")
+ tdSql.error("select distinct stb1.c1, stb1.c2, stb2.c2, stb2.c3 from t1")
+ tdSql.error("select distinct t1.c1, t1.c2, t2.c1, t2.c2 from t1")
+ tdSql.query(f"select distinct c1 c2, c2 c3 from stb1 where c1 <{tbnum}")
+ tdSql.checkRows(tbnum*3)
+ tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from stb1 order by ts")
+ tdSql.error("select distinct c1, c2 from t1 order by ts")
+ tdSql.error("select distinct c1, ts from stb1 group by c2")
+ tdSql.error("select distinct c1, ts from t1 group by c2")
+ tdSql.error("select distinct c1, max(c2) from stb1 ")
+ tdSql.error("select distinct c1, max(c2) from t1 ")
+ tdSql.error("select max(c2), distinct c1 from stb1 ")
+ tdSql.error("select max(c2), distinct c1 from t1 ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 group by t0")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) ")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from t1 where ts > now-10d and ts < now interval(1d) fill(next)")
+ tdSql.error("select distinct c1, c2 from stb1 where c1 > 3 slimit 1")
+ tdSql.error("select distinct c1, c2 from t1 where c1 > 3 slimit 1")
+ tdSql.query(f"select distinct c1, c2 from stb1 where c1 between {tbnum-2} and {tbnum} ")
+ tdSql.checkRows(6)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (1,2,3,4,5)")
+ tdSql.checkRows(15)
+ tdSql.query("select distinct c1, c2 from stb1 where c1 in (100,1000,10000)")
+ tdSql.checkRows(3)
+
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where c1 > {tbnum-2}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from t1 where c1 < {tbnum}) ")
+ tdSql.checkRows(3)
+ tdSql.query(f"select distinct c1,c2 from (select * from stb1 where t2 !=0 and t2 != 1) ")
+ tdSql.checkRows(0)
+ tdSql.error("select distinct c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select c1, c2 from (select distinct c1, c2 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.query("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 ) where c1 < 4")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct c1, c2 from (select c1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 order by ts)")
+ # tdSql.error("select distinct c1, c2 from (select c2, c1 from t1 where c1 > 2 order by ts)")
+ tdSql.error("select distinct c1, c2 from (select c2, c1 from stb1 where c1 > 2 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 group by c1)")
+ # tdSql.error("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 group by c1)")
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from stb1 )")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct c1, c2 from (select max(c1) c1, max(c2) c2 from t1 )")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.c1, stb1.c2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.c1, t1.c2 from t1 , t2 where t1.ts=t2.ts ")
+
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from stb1 group by ts)")
+ # tdSql.error("select distinct c1, c2 from (select count(c1) c1, count(c2) c2 from t1 group by ts)")
+
+
+
+ #========== TD-5798 suport distinct multi-tags-coloumn ==========
+ tdSql.query("select distinct t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0, t1, t2 from stb1")
+ tdSql.checkRows(maxRemainderNum*2+1)
+ tdSql.query("select distinct t0 t1, t1 t2 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t0, t0 from stb1")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t0, t1 from t1")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from t100num")
+ tdSql.checkRows(1)
+
+ tdSql.query("select distinct t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t4, t2 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2, t3, t4 from stb2")
+ tdSql.checkRows(maxRemainderNum*3+1)
+ tdSql.query("select distinct t2 t1, t3 t2 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t3, t3, t3 from stb2")
+ tdSql.checkRows(maxRemainderNum+1)
+ tdSql.query("select distinct t2, t3 from t01")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t3, t4 from t0100num")
+ tdSql.checkRows(1)
+
+
+ ########## should be error #########
+ tdSql.error("select distinct from stb1")
+ tdSql.error("select distinct t3 from stb1")
+ tdSql.error("select distinct t1 from db.*")
+ tdSql.error("select distinct t2 from ")
+ tdSql.error("distinct t2 from stb1")
+ tdSql.error("select distinct stb1")
+ tdSql.error("select distinct t0, t1, t2, t3 from stb1")
+ tdSql.error("select distinct stb1.t0, stb1.t1, stb2.t2, stb2.t3 from stb1")
+
+ tdSql.error("select dist t0 from stb1")
+ tdSql.error("select distinct stb2.t2, stb2.t3 from stb1")
+ tdSql.error("select distinct stb2.t2 t1, stb2.t3 t2 from stb1")
+
+ tdSql.error("select distinct t0, t1 from t1 where t0 < 7")
+
+ ########## add where condition ##########
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3")
+ tdSql.checkRows(3)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2")
+ tdSql.checkRows(2)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 limit 2 offset 2")
+ tdSql.checkRows(1)
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 slimit 2")
+ tdSql.checkRows(3)
+ tdSql.error("select distinct t0, t1 from stb1 where c1 > 2")
+ tdSql.query("select distinct t0, t1 from stb1 where t1 > 3 and t1 < 5")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct stb1.t0, stb1.t1 from stb1, stb2 where stb1.t2=stb2.t4")
+ tdSql.error("select distinct t0, t1 from stb1 where stb2.t4 > 2")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 group by t0")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) ")
+ tdSql.error("select distinct t0, t1 from stb1 where t1 > 3 interval(1d) fill(next)")
+ tdSql.error("select distinct t0, t1 from stb1 where ts > now-10d and ts < now interval(1d) fill(next)")
+
+ tdSql.error("select max(c1), distinct t0 from stb1 where t0 > 2")
+ tdSql.error("select distinct t0, max(c1) from stb1 where t0 > 2")
+ tdSql.error("select distinct t0 from stb1 where t0 in (select t0 from stb1 where t0 > 2)")
+ tdSql.query("select distinct t0, t1 from stb1 where t0 in (1,2,3,4,5)")
+ tdSql.checkRows(5)
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2) ")
+ tdSql.checkRows(4)
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 and t1 < 3) ")
+ tdSql.error("select distinct t1 from (select distinct t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.query("select distinct t1 from (select t0, t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error("select distinct t1, t0 from (select t1 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1 group by t1)")
+ tdSql.error("select distinct t1, t0 from (select max(t1) t1, max(t0) t0 from stb1)")
+ tdSql.query("select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.checkRows(1)
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 order by ts) where t1 < 3")
+ tdSql.error("select t1, t0 from (select distinct t1,t0 from stb1 where t0 > 2 ) where t1 < 3")
+ tdSql.error(" select distinct t1, t0 from (select t1,t0 from stb1 where t0 > 2 group by ts) where t1 < 3")
+ tdSql.error("select distinct stb1.t1, stb1.t2 from stb1 , stb2 where stb1.ts=stb2.ts and stb1.t2=stb2.t4")
+ tdSql.error("select distinct t1.t1, t1.t2 from t1 , t2 where t1.ts=t2.ts ")
+
+ pass
+
+ def td5935(self):
+ tdLog.printNoPrefix("==========TD-5935==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+
+ tdSql.execute("use db")
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 int)")
+ nowtime=int(round((time.time()*1000)))
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i % 7}, {i % 2})"
+ tdSql.execute(sql)
+ for j in range(1000):
+ tdSql.execute(f"insert into db.t{i} values ({nowtime-j*10}, {1000-j}, {round(random.random()*j,3)})")
+ tdSql.execute(f"insert into db.t{i} (ts) values ({nowtime-10000}) ")
+
+ ########### TD-5933 verify the bug of "function stddev with interval return 0 rows" is fixed ##########
+ stddevAndIntervalSql=f"select last(*) from t0 where ts>={nowtime-10000} interval(10a) limit 10"
+ tdSql.query(stddevAndIntervalSql)
+ tdSql.checkRows(10)
+
+ ########## TD-5978 verify the bug of "when start row is null, result by fill(next) is 0 " is fixed ##########
+ fillsql=f"select last(*) from t0 where ts>={nowtime-10000} and ts<{nowtime} interval(10a) fill(next) limit 10"
+ tdSql.query(fillsql)
+ fillResult=False
+ if (tdSql.getData(0,2) != 0) and (tdSql.getData(0, 2) is not None):
+ fillResult=True
+ if fillResult:
+ tdLog.success(f"sql is :{fillsql}, fill(next) is correct")
+ else:
+ tdLog.exit("fill(next) is wrong")
+
+ pass
+
+ def td6068(self):
+ tdLog.printNoPrefix("==========TD-6068==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute("create stable db.stb1 (ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool) tags(t1 int)")
+
+ for i in range(100):
+ sql = f"create table db.t{i} using db.stb1 tags({i})"
+ tdSql.execute(sql)
+ tdSql.execute(f"insert into db.t{i} values (now-10h, {i}, {i+random.random()}, now-10h, 'a_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-9h, {i+random.randint(1,10)}, {i+random.random()}, now-9h, 'a_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-8h, {i+random.randint(1,10)}, {i+random.random()}, now-8h, 'b_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-7h, {i+random.randint(1,10)}, {i+random.random()}, now-7h, 'b_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} values (now-6h, {i+random.randint(1,10)}, {i+random.random()}, now-6h, 'c_{i}', '{i-random.random()}', True)")
+ tdSql.execute(f"insert into db.t{i} values (now-5h, {i+random.randint(1,10)}, {i+random.random()}, now-5h, 'c_{i}', '{i-random.random()}', FALSE )")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-4h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-11h)")
+ tdSql.execute(f"insert into db.t{i} (ts)values (now-450m)")
+
+ tdSql.query("select ts as t,derivative(c1, 10m, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, derivative(c1, 1h, 0) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, derivative(c1, 1s, 0) from t1")
+ tdSql.query("select ts as t, derivative(c1, 1d, 0) from t1")
+ tdSql.error("select ts as t, derivative(c1, 1h, 0) from stb1")
+ tdSql.query("select ts as t, derivative(c2, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c3, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(c4, 1h, 0) from t1")
+ tdSql.query("select ts as t, derivative(c5, 1h, 0) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, derivative(c6, 1h, 0) from t1")
+ tdSql.error("select ts as t, derivative(t1, 1h, 0) from t1")
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.checkCols(4)
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.query("select ts as t, diff(c1) from t1")
+ tdSql.error("select ts as t, diff(c1) from stb1")
+ tdSql.query("select ts as t, diff(c2) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c3) from t1")
+ tdSql.error("select ts as t, diff(c4) from t1")
+ tdSql.query("select ts as t, diff(c5) from t1")
+ tdSql.checkRows(5)
+ tdSql.error("select ts as t, diff(c6) from t1")
+ tdSql.error("select ts as t, diff(t1) from t1")
+ tdSql.error("select ts as t, diff(c1, c2) from t1")
+
+ tdSql.error("select ts as t, bottom(c1, 0) from t1")
+ tdSql.query("select ts as t, bottom(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, bottom(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, bottom(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c3, 5) from t1")
+ tdSql.error("select ts as t, bottom(c4, 5) from t1")
+ tdSql.query("select ts as t, bottom(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, bottom(c6, 5) from t1")
+ tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, bottom(t1, 1) from t1")
+ tdSql.error("select ts as t, bottom(t1, 1) from stb1")
+ tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
+
+
+ tdSql.error("select ts as t, top(c1, 0) from t1")
+ tdSql.query("select ts as t, top(c1, 5) from t1")
+ tdSql.checkRows(5)
+ tdSql.checkCols(3)
+ for i in range(5):
+ data=tdSql.getData(i, 0)
+ tdSql.checkData(i, 1, data)
+ tdSql.query("select ts as t, top(c1, 5) from stb1")
+ tdSql.checkRows(5)
+ tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
+ tdSql.checkRows(500)
+ tdSql.query("select ts as t, top(c1, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.query("select ts as t, top(c2, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c3, 5) from t1")
+ tdSql.error("select ts as t, top(c4, 5) from t1")
+ tdSql.query("select ts as t, top(c5, 8) from t1")
+ tdSql.checkRows(6)
+ tdSql.error("select ts as t, top(c6, 5) from t1")
+ tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
+ tdSql.error("select ts as t, top(t1, 1) from t1")
+ tdSql.error("select ts as t, top(t1, 1) from stb1")
+ tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
+ tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
+ pass
+
+ def apercentile_query_form(self, col="c1", p=0, com=',', algo="'t-digest'", alias="", table_expr="t1", condition=""):
+
+ '''
+ apercentile function:
+ :param col: string, column name, required parameters;
+ :param p: float, percentile interval, [0,100], required parameters;
+ :param algo: string, alforithm, real form like: ', algorithm' , algorithm: {type:int, data:[0, 1]};
+ :param alias: string, result column another name;
+ :param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
+ :param condition: expression;
+ :param args: other funtions,like: ', last(col)'
+ :return: apercentile query statement,default: select apercentile(c1, 0, 1) from t1
+ '''
+
+ return f"select apercentile({col}, {p}{com} {algo}) {alias} from {table_expr} {condition}"
+
+ def checkapert(self,col="c1", p=0, com=',', algo='"t-digest"', alias="", table_expr="t1", condition="" ):
+
+ tdSql.query(f"select count({col}) from {table_expr} {condition}")
+ if tdSql.queryRows == 0:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=p, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+ tdSql.checkRows(0)
+ return
+
+ pset = [0, 40, 60, 100]
+ if p not in pset:
+ pset.append(p)
+
+ if "stb" in table_expr:
+ tdSql.query(f"select spread({col}) from stb1")
+ else:
+ tdSql.query(f"select avg(c1) from (select spread({col.split('.')[-1]}) c1 from stb1 group by tbname)")
+ spread_num = tdSql.getData(0, 0)
+
+ for pi in pset:
+
+ if "group" in condition:
+ tdSql.query(f"select last_row({col}) from {table_expr} {condition}")
+ query_result = tdSql.queryResult
+ query_rows = tdSql.queryRows
+ for i in range(query_rows):
+ pre_condition = condition.replace("slimit",'limit').replace("group by tbname", "").split("soffset")[0]
+ tbname = query_result[i][-1]
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {tbname} {pre_condition}")
+ print(tdSql.sql)
+ pre_data = tdSql.getData(0, 0)
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ tdSql.checkDeviaRation(i, 0, pre_data, 0.1)
+ else:
+ devia = abs((tdSql.getData(i, 0) - pre_data) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(i, 0)}, expect data:{pre_data}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+ # if "group" in condition:
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # query_result = tdSql.queryResult
+ # query_rows = tdSql.queryRows
+ # tdSql.query(self.apercentile_query_form(
+ # col=col, p=pi, com=com, algo='"t-digest"', alias=alias, table_expr=table_expr, condition=condition
+ # ))
+ # for i in range(query_rows):
+ # if abs(tdSql.getData(i, 0)) >= (spread_num*0.02):
+ # tdSql.checkDeviaRation(i, 0, query_result[i][0], 0.1)
+ # else:
+ # devia = abs((tdSql.getData(i, 0) - query_result[i][0]) / (spread_num * 0.02))
+ # if devia < 0.5:
+ # tdLog.info(f"sql:{tdSql.sql}, result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} <= expect deviation: 0.01")
+ # else:
+ # tdLog.exit(
+ # f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ # f"result data:{tdSql.getData(i, 0)}, expect data:{tdSql.queryResult[i][0]}, "
+ # f"actual deviation:{devia} > expect deviation: 0.01")
+
+ else:
+ if ',' in alias or not alias:
+ tdSql.query(f"select {col} from {table_expr} {condition}")
+ elif "stb" not in table_expr:
+ tdSql.query(f"select percentile({col}, {pi}) {alias} from {table_expr} {condition}")
+ else:
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo='"default"', alias=alias, table_expr=table_expr, condition=condition
+ ))
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query(self.apercentile_query_form(
+ col=col, p=pi, com=com, algo=algo, alias=alias, table_expr=table_expr, condition=condition
+ ))
+
+ if abs(tdSql.getData(0, 0)) >= (spread_num * 0.02):
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, pi), 0.1)
+ else:
+ devia = abs((tdSql.getData(0, 0) - np.percentile(query_result, pi)) / (spread_num * 0.02))
+ if devia < 0.5:
+ tdLog.info(
+ f"sql:{tdSql.sql}, result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} <= expect deviation: 0.01")
+ else:
+ tdLog.exit(
+ f"[{inspect.getframeinfo(inspect.stack()[1][0]).lineno}],check failed:sql:{tdSql.sql}, "
+ f"result data:{tdSql.getData(0, 0)}, expect data:{np.percentile(query_result, pi)}, "
+ f"actual deviation:{devia} > expect deviation: 0.01")
+
+
+ def apercentile_query(self):
+
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+
+ # case1: int col
+ self.checkapert()
+ # case2: float col
+ case2 = {'col':'c2'}
+ self.checkapert(**case2)
+ # case3: double col
+ case3 = {'col':'c5'}
+ self.checkapert(**case3)
+ # case4: bigint col
+ case4 = {'col':'c7'}
+ self.checkapert(**case4)
+ # case5: smallint col
+ case5 = {'col':'c8'}
+ self.checkapert(**case5)
+ # case6: tinyint col
+ case6 = {'col':'c9'}
+ self.checkapert(**case6)
+ # case7: stable
+ case7 = {'table_expr':'stb1'}
+ self.checkapert(**case7)
+ # case8: nest query, outquery
+ case8 = {'table_expr':'(select c1 from t1)'}
+ self.checkapert(**case8)
+ # case9: nest query, inquery and out query
+ case9 = {'table_expr':'(select apercentile(c1, 0) as c1 from t1)'}
+ self.checkapert(**case9)
+
+ # case10: nest query, inquery
+ tdSql.query("select * from (select c1 from stb1)")
+ if tdSql.queryRows == 0:
+ tdSql.query("select * from (select apercentile(c1,0) c1 from stb1)")
+ tdSql.checkRows(0)
+ else:
+ query_result = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None]
+ tdSql.query("select * from (select apercentile(c1, 0) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 0), 0.1)
+ tdSql.query("select * from (select apercentile(c1,100) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 100), 0.1)
+ tdSql.query("select * from (select apercentile(c1,40) c1 from stb1)")
+ tdSql.checkDeviaRation(0, 0, np.percentile(query_result, 40), 0.1)
+
+ # case11: no algorithm = algo:0
+ case11 = {'com':'', 'algo': ''}
+ self.checkapert(**case11)
+
+ # case12~14: p: bin/oct/hex
+ case12 = {'p': 0b1100100}
+ self.checkapert(**case12)
+ case13 = {'algo':'"T-DIGEST"'}
+ self.checkapert(**case13)
+ case14 = {'p':0x32, 'algo':'"DEFAULT"'}
+ self.checkapert(**case14)
+
+ # case15~21: mix with aggregate function
+ case15 = {'alias':', count(*)'}
+ self.checkapert(**case15)
+ case16 = {'alias':', avg(c1)'}
+ self.checkapert(**case16)
+ case17 = {'alias':', twa(c1)'}
+ self.checkapert(**case17)
+ case18 = {'alias':', irate(c1)'}
+ self.checkapert(**case18)
+ case19 = {'alias':', sum(c1)'}
+ self.checkapert(**case19)
+ case20 = {'alias':', stddev(c1)'}
+ self.checkapert(**case20)
+ case21 = {'alias':', leastsquares(c1, 1, 1)'}
+ self.checkapert(**case21)
+
+ # case22~27:mix with selector function
+ case22 = {'alias':', min(c1)'}
+ self.checkapert(**case22)
+ case23 = {'alias':', max(c1)'}
+ self.checkapert(**case23)
+ case24 = {'alias':', first(c1)'}
+ self.checkapert(**case24)
+ case25 = {'alias':', last(c1)'}
+ self.checkapert(**case25)
+ case26 = {'alias':', percentile(c1, 0)'}
+ self.checkapert(**case26)
+ case27 = {'alias':', apercentile(c1, 0, "t-digest")'}
+ self.checkapert(**case27)
+
+ # case28~29: mix with computing function
+ case28 = {'alias':', spread(c1)'}
+ self.checkapert(**case28)
+ # case29: mix with four operation
+ case29 = {'alias':'+ spread(c1)'}
+ self.checkapert(**case29)
+
+ # case30~36: with condition
+ case30 = {'condition':'where ts > now'}
+ self.checkapert(**case30)
+ case31 = {'condition':'where c1 between 1 and 200'}
+ self.checkapert(**case31)
+ case32 = {'condition':f'where c1 in {tuple(i for i in range(200))}'}
+ self.checkapert(**case32)
+ case33 = {'condition':'where c1>100 and c2<100'}
+ self.checkapert(**case33)
+ case34 = {'condition':'where c1 is not null'}
+ self.checkapert(**case34)
+ case35 = {'condition':'where c4 like "_inary%"'}
+ self.checkapert(**case35)
+ case36 = {'table_expr':'stb1' ,'condition':'where tbname like "t_"'}
+ self.checkapert(**case36)
+
+ # case37~38: with join
+ case37 = {'col':'t1.c1','table_expr':'t1, t2 ','condition':'where t1.ts=t2.ts'}
+ self.checkapert(**case37)
+ case38 = {'col':'stb1.c1', 'table_expr':'stb1, stb2', 'condition':'where stb1.ts=stb2.ts and stb1.st1=stb2.st2'}
+ self.checkapert(**case38)
+
+ # case39: with group by
+ case39 = {'table_expr':'stb1', 'condition':'group by tbname'}
+ self.checkapert(**case39)
+
+ # case40: with slimit
+ case40 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1'}
+ self.checkapert(**case40)
+
+ # case41: with soffset
+ case41 = {'table_expr':'stb1', 'condition':'group by tbname slimit 1 soffset 1'}
+ self.checkapert(**case41)
+
+ # case42: with order by
+ case42 = {'table_expr':'stb1' ,'condition':'order by ts'}
+ self.checkapert(**case42)
+ case43 = {'table_expr':'t1' ,'condition':'order by ts'}
+ self.checkapert(**case43)
+
+ # case44: with limit offset
+ case44 = {'table_expr':'stb1', 'condition':'group by tbname limit 1'}
+ self.checkapert(**case44)
+ case45 = {'table_expr':'stb1', 'condition':'group by tbname limit 1 offset 1'}
+ self.checkapert(**case45)
+
+ pass
+
+ def error_apercentile(self):
+
+ # unusual test
+ #
+ # table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
+ # c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
+ #
+ # form test
+ tdSql.error(self.apercentile_query_form(col="",com='',algo='')) # no col , no algorithm
+ tdSql.error(self.apercentile_query_form(col="")) # no col , algorithm
+ tdSql.error(self.apercentile_query_form(p='',com='',algo='')) # no p , no algorithm
+ tdSql.error(self.apercentile_query_form(p='')) # no p , algorithm
+ tdSql.error("apercentile( c1, 100) from t1") # no select
+ tdSql.error("select apercentile from t1") # no algorithm condition
+ tdSql.error("select apercentile c1,0 from t1") # no brackets
+ tdSql.error("select apercentile (c1,0) t1") # no from
+ tdSql.error(self.apercentile_query_form(col='(c1,0)',p='',com='',algo='')) # no p , no algorithm
+ tdSql.error("select apercentile( (c1,0) ) from t1") # no table_expr
+ tdSql.error("select apercentile{ (c1,0) } from t1") # sql form error 1
+ tdSql.error("select apercentile[ (c1,0) ] from t1") # sql form error 2
+ tdSql.error("select [apercentile(c1,0) ] from t1") # sql form error 3
+ tdSql.error("select apercentile((c1, 0), 'default') from t1") # sql form error 5
+ tdSql.error("select apercentile(c1, (0, 'default')) from t1") # sql form error 6
+ tdSql.error("select apercentile(c1, (0), 1) from t1") # sql form error 7
+ tdSql.error("select apercentile([c1, 0], 'default') from t1") # sql form error 8
+ tdSql.error("select apercentile(c1, [0, 'default']) from t1") # sql form error 9
+ tdSql.error("select apercentile(c1, {0, 'default'}) from t1") # sql form error 10
+ tdSql.error("select apercentile([c1, 0]) from t1") # sql form error 11
+ tdSql.error("select apercentile({c1, 0}) from t1") # sql form error 12
+ tdSql.error("select apercentile(c1) from t1") # agrs: 1
+ tdSql.error("select apercentile(c1, 0, 'default', 0) from t1") # agrs: 4
+ tdSql.error("select apercentile(c1, 0, 0, 'default') from t1") # agrs: 4
+ tdSql.error("select apercentile() from t1") # agrs: null 1
+ tdSql.error("select apercentile from t1") # agrs: null 2
+ tdSql.error("select apercentile( , , ) from t1") # agrs: null 3
+ tdSql.error(self.apercentile_query_form(col='', p='', algo='')) # agrs: null 4
+ tdSql.error(self.apercentile_query_form(col="st1")) # col:tag column
+ tdSql.error(self.apercentile_query_form(col=123)) # col:numerical
+ tdSql.error(self.apercentile_query_form(col=True)) # col:bool
+ tdSql.error(self.apercentile_query_form(col='')) # col:''
+ tdSql.error(self.apercentile_query_form(col="last(c1)")) # col:expr
+ tdSql.error(self.apercentile_query_form(col="t%")) # col:non-numerical
+ tdSql.error(self.apercentile_query_form(col="c3")) # col-type: timestamp
+ tdSql.error(self.apercentile_query_form(col="c4")) # col-type: binary
+ tdSql.error(self.apercentile_query_form(col="c6")) # col-type: bool
+ tdSql.error(self.apercentile_query_form(col="c10")) # col-type: nchar
+ tdSql.error(self.apercentile_query_form(p=True)) # p:bool
+ tdSql.error(self.apercentile_query_form(p='a')) # p:str
+ tdSql.error(self.apercentile_query_form(p='last(*)')) # p:expr
+ tdSql.error(self.apercentile_query_form(p="2021-08-01 00:00:00.000")) # p:timestamp
+ tdSql.error(self.apercentile_query_form(algo='t-digest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t_digest"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest0"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest."')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest%"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='"t-digest*"')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo='tdigest')) # algorithm:str
+ tdSql.error(self.apercentile_query_form(algo=2.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=1.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-0.9999)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=-1.0)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0b1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0x1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=0o1)) # algorithm:float
+ tdSql.error(self.apercentile_query_form(algo=True)) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo="True")) # algorithm:bool
+ tdSql.error(self.apercentile_query_form(algo='2021-08-01 00:00:00.000')) # algorithm:timestamp
+ tdSql.error(self.apercentile_query_form(algo='last(c1)')) # algorithm:expr
+
+ # boundary test
+ tdSql.error(self.apercentile_query_form(p=-1)) # p left out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=-9223372036854775809)) # p left out of bigint
+ tdSql.error(self.apercentile_query_form(p=100.1)) # p right out of [0, 100]
+ tdSql.error(self.apercentile_query_form(p=18446744073709551616)) # p right out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=-1)) # algorithm left out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=-9223372036854775809)) # algorithm left out of unsigned-bigint
+ tdSql.error(self.apercentile_query_form(algo=2)) # algorithm right out of [0, 1]
+ tdSql.error(self.apercentile_query_form(algo=18446744073709551616)) # algorithm right out of unsigned-bigint
+
+ # mix function test
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with top function
+ tdSql.error(self.apercentile_query_form(alias=', top(c1,1)')) # mix with bottom function
+ tdSql.error(self.apercentile_query_form(alias=', last_row(c1)')) # mix with last_row function
+ tdSql.error(self.apercentile_query_form(alias=', distinct c1 ')) # mix with distinct function
+ tdSql.error(self.apercentile_query_form(alias=', *')) # mix with *
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias=', interp(c1)', condition='ts="2021-10-10 00:00:00.000"')) # mix with interp function
+ tdSql.error(self.apercentile_query_form(alias=', derivative(c1, 10m, 0)')) # mix with derivative function
+ tdSql.error(self.apercentile_query_form(alias=', diff(c1)')) # mix with diff function
+ tdSql.error(self.apercentile_query_form(alias='+ c1)')) # mix with four operation
+
+ def apercentile_data(self, tbnum, data_row, basetime):
+ for i in range(tbnum):
+ for j in range(data_row):
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime + j*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
+ f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
+ f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
+ )
+
+ tdSql.execute(
+ f"insert into t{i} values ("
+ f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
+ f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
+ f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
+ )
+ tdSql.execute(
+ f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ )
+
+ pass
+
+ def td6108(self):
+ tdLog.printNoPrefix("==========TD-6108==========")
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db keep 3650")
+ tdSql.execute("use db")
+
+ tdSql.execute(
+ "create stable db.stb1 (\
+ ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
+ c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
+ ) \
+ tags(st1 int)"
+ )
+ tdSql.execute(
+ "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ )
+ tbnum = 10
+ for i in range(tbnum):
+ tdSql.execute(f"create table t{i} using stb1 tags({i})")
+ tdSql.execute(f"create table tt{i} using stb2 tags({i})")
+
+ tdLog.printNoPrefix("######## no data test:")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data test:")
+ nowtime = int(round(time.time() * 1000))
+ per_table_rows = 1000
+ self.apercentile_data(tbnum, per_table_rows, nowtime)
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## insert data with NULL test:")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime-5})")
+ tdSql.execute(f"insert into t1(ts) values ({nowtime+5})")
+ self.apercentile_query()
+ self.error_apercentile()
+
+ tdLog.printNoPrefix("######## check after WAL test:")
+ tdSql.query("show dnodes")
+ index = tdSql.getData(0, 0)
+ tdDnodes.stop(index)
+ tdDnodes.start(index)
+
+ self.apercentile_query()
+ self.error_apercentile()
+
+
+ def run(self):
+
+ # master branch
+ self.td6068()
+
+ # self.td5168()
+ # self.td5433()
+ # self.td5798()
+
+ # develop branch
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
+
+
+
diff --git a/tests/pytest/functions/queryTestCases.py b/tests/pytest/functions/queryTestCases.py
index e5213d9f7253d1a82e84584c87b66291daf01f25..75e2359bb1c6b03e27e60ea75dbaeb6e77f2cc13 100644
--- a/tests/pytest/functions/queryTestCases.py
+++ b/tests/pytest/functions/queryTestCases.py
@@ -1581,7 +1581,7 @@ class TDTestCase:
# self.td5798()
# develop branch
- self.td4889()
+ # self.td4889() In the scenario that with vnode/wal/wal* but without meta/data in vnode, the status is reset to 0 right now.
self.td5798()
def stop(self):
diff --git a/tests/pytest/insert/boundary2.py b/tests/pytest/insert/boundary2.py
index 72d00228a52991bef5599aee0c499c6406588d23..12b82be8034112b958fca52bdf08d34f9286dd5c 100644
--- a/tests/pytest/insert/boundary2.py
+++ b/tests/pytest/insert/boundary2.py
@@ -61,6 +61,12 @@ class TDTestCase:
tdSql.query("select count(*) from stb")
tdSql.checkData(0, 0, 4096)
+ sql = "create table stb(ts timestamp, "
+ for i in range(15):
+ sql += "col%d binary(1022), " % (i + 1)
+ sql += "col1023 binary(1015))"
+ tdSql.error(sql)
+
endTime = time.time()
sql = "create table stb(ts timestamp, "
diff --git a/tests/pytest/insert/insertJSONPayload.py b/tests/pytest/insert/insertJSONPayload.py
index 41d60cd1520e09b94c90083f8b6a361df4556444..81d4b47ef15cb03311943d3d53c2efe25a3b0312 100644
--- a/tests/pytest/insert/insertJSONPayload.py
+++ b/tests/pytest/insert/insertJSONPayload.py
@@ -36,7 +36,7 @@ class TDTestCase:
print("============= step0 : test metric ================")
payload = ['''
{
- "metric": "`.stb.0.`",
+ "metric": ".stb.0.",
"timestamp": 1626006833610,
"value": 10,
"tags": {
@@ -664,6 +664,183 @@ class TDTestCase:
tdSql.checkData(9, 1, "BINARY")
tdSql.checkData(10, 1, "NCHAR")
+ ### special characters ###
+
+ payload = ['''
+ {
+ "metric": "1234",
+ "timestamp": 1626006833,
+ "value": 1,
+ "tags": {
+ "id": "123",
+ "456": true,
+ "int": false,
+ "double": 1,
+ "into": 1,
+ "from": 2,
+ "!@#$.%^&*()": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
+ }
+ }
+ ''']
+ code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query("describe `1234`")
+ tdSql.checkRows(8)
+
+ tdSql.query("select * from `123`")
+ tdSql.checkRows(1)
+
+ payload = ['''
+ {
+ "metric": "int",
+ "timestamp": 1626006833,
+ "value": 1,
+ "tags": {
+ "id": "and",
+ "456": true,
+ "int": false,
+ "double": 1,
+ "into": 1,
+ "from": 2,
+ "!@#$.%^&*()": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
+ }
+ }
+ ''']
+ code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query("describe `int`")
+ tdSql.checkRows(8)
+
+ tdSql.query("select * from `and`")
+ tdSql.checkRows(1)
+
+ payload = ['''
+ {
+ "metric": "double",
+ "timestamp": 1626006833,
+ "value": 1,
+ "tags": {
+ "id": "for",
+ "456": true,
+ "int": false,
+ "double": 1,
+ "into": 1,
+ "from": 2,
+ "!@#$.%^&*()": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
+ }
+ }
+ ''']
+ code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query("describe `double`")
+ tdSql.checkRows(8)
+
+ tdSql.query("select * from `for`")
+ tdSql.checkRows(1)
+
+ payload = ['''
+ {
+ "metric": "from",
+ "timestamp": 1626006833,
+ "value": 1,
+ "tags": {
+ "id": "!@#.^&",
+ "456": true,
+ "int": false,
+ "double": 1,
+ "into": 1,
+ "from": 2,
+ "!@#$.%^&*()": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
+ }
+ }
+ ''']
+ code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query("describe `from`")
+ tdSql.checkRows(8)
+
+ tdSql.query("select * from `!@#.^&`")
+ tdSql.checkRows(1)
+
+ payload = ['''
+ {
+ "metric": "!@#$.%^&*()",
+ "timestamp": 1626006833,
+ "value": 1,
+ "tags": {
+ "id": "none",
+ "456": true,
+ "int": false,
+ "double": 1,
+ "into": 1,
+ "from": 2,
+ "!@#$.%^&*()": "123_abc_.!@#$%^&*:;,./?|+-=()[]{}<>"
+ }
+ }
+ ''']
+ code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query("describe `!@#$.%^&*()`")
+ tdSql.checkRows(8)
+
+ tdSql.query("select * from `none`")
+ tdSql.checkRows(1)
+
+ payload = ['''
+ {
+ "metric": "STABLE",
+ "timestamp": {
+ "value": 1626006833,
+ "type": "s"
+ },
+ "value": {
+ "value": "hello",
+ "type": "nchar"
+ },
+ "tags": {
+ "id": "KEY",
+ "456": {
+ "value": true,
+ "type": "bool"
+ },
+ "int": {
+ "value": 127,
+ "type": "tinyint"
+ },
+ "double":{
+ "value": 32767,
+ "type": "smallint"
+ },
+ "into": {
+ "value": 2147483647,
+ "type": "int"
+ },
+ "INSERT": {
+ "value": 9.2233720368547758e+18,
+ "type": "bigint"
+ },
+ "!@#$.%^&*()": {
+ "value": 11.12345,
+ "type": "float"
+ }
+ }
+ }
+ ''']
+
+ code = self._conn.schemaless_insert(payload, TDSmlProtocolType.JSON.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query("describe `stable`")
+ tdSql.checkRows(8)
+
+ tdSql.query("select * from `key`")
+ tdSql.checkRows(1)
+
def stop(self):
tdSql.close()
diff --git a/tests/pytest/insert/insertTelnetLines.py b/tests/pytest/insert/insertTelnetLines.py
index 0ecf93b5a459d2aac2a656543e946173f8309759..a48351f6c0b162be83f6aca44a87ff9f55b498c8 100644
--- a/tests/pytest/insert/insertTelnetLines.py
+++ b/tests/pytest/insert/insertTelnetLines.py
@@ -35,7 +35,7 @@ class TDTestCase:
"stb0_0 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_1 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_2 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
- "`.stb0.3.` 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
+ ".stb0.3. 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
]
code = self._conn.schemaless_insert(lines0, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -59,28 +59,24 @@ class TDTestCase:
### timestamp ###
print("============= step2 : test timestamp ================")
lines1 = [
- "stb1 1626006833s 1i8 host=\"host0\"",
- "stb1 1626006833639000000ns 2i8 host=\"host0\"",
- "stb1 1626006833640000us 3i8 host=\"host0\"",
- "stb1 1626006833641 4i8 host=\"host0\"",
- "stb1 1626006834 5i8 host=\"host0\"",
- "stb1 1626006833651ms 6i8 host=\"host0\"",
- "stb1 0 7i8 host=\"host0\"",
+ "stb1 1626006833641 1i8 host=\"host0\"",
+ "stb1 1626006834 2i8 host=\"host0\"",
+ "stb1 0 3i8 host=\"host0\"",
]
code = self._conn.schemaless_insert(lines1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
print("schemaless_insert result {}".format(code))
tdSql.query("select * from stb1")
- tdSql.checkRows(7)
+ tdSql.checkRows(3)
### metric value ###
print("============= step3 : test metric value ================")
#tinyint
lines2_0 = [
- "stb2_0 1626006833651ms -127i8 host=\"host0\"",
- "stb2_0 1626006833652ms 127i8 host=\"host0\""
+ "stb2_0 1626006833651 -127i8 host=\"host0\"",
+ "stb2_0 1626006833652 127i8 host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_0, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
print("schemaless_insert result {}".format(code))
@@ -94,8 +90,8 @@ class TDTestCase:
#smallint
lines2_1 = [
- "stb2_1 1626006833651ms -32767i16 host=\"host0\"",
- "stb2_1 1626006833652ms 32767i16 host=\"host0\""
+ "stb2_1 1626006833651 -32767i16 host=\"host0\"",
+ "stb2_1 1626006833652 32767i16 host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
print("schemaless_insert result {}".format(code))
@@ -109,8 +105,8 @@ class TDTestCase:
#int
lines2_2 = [
- "stb2_2 1626006833651ms -2147483647i32 host=\"host0\"",
- "stb2_2 1626006833652ms 2147483647i32 host=\"host0\""
+ "stb2_2 1626006833651 -2147483647i32 host=\"host0\"",
+ "stb2_2 1626006833652 2147483647i32 host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_2, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -125,8 +121,8 @@ class TDTestCase:
#bigint
lines2_3 = [
- "stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"",
- "stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\""
+ "stb2_3 1626006833651 -9223372036854775807i64 host=\"host0\"",
+ "stb2_3 1626006833652 9223372036854775807i64 host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_3, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -141,16 +137,16 @@ class TDTestCase:
#float
lines2_4 = [
- "stb2_4 1626006833610ms 3f32 host=\"host0\"",
- "stb2_4 1626006833620ms -3f32 host=\"host0\"",
- "stb2_4 1626006833630ms 3.4f32 host=\"host0\"",
- "stb2_4 1626006833640ms -3.4f32 host=\"host0\"",
- "stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"",
- "stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"",
- "stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"",
- "stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"",
- "stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"",
- "stb2_4 1626006833710ms -3.4E38f32 host=\"host0\""
+ "stb2_4 1626006833610 3f32 host=\"host0\"",
+ "stb2_4 1626006833620 -3f32 host=\"host0\"",
+ "stb2_4 1626006833630 3.4f32 host=\"host0\"",
+ "stb2_4 1626006833640 -3.4f32 host=\"host0\"",
+ "stb2_4 1626006833650 3.4E10f32 host=\"host0\"",
+ "stb2_4 1626006833660 -3.4e10f32 host=\"host0\"",
+ "stb2_4 1626006833670 3.4E+2f32 host=\"host0\"",
+ "stb2_4 1626006833680 -3.4e-2f32 host=\"host0\"",
+ "stb2_4 1626006833700 3.4E38f32 host=\"host0\"",
+ "stb2_4 1626006833710 -3.4E38f32 host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_4, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -165,17 +161,17 @@ class TDTestCase:
#double
lines2_5 = [
- "stb2_5 1626006833610ms 3f64 host=\"host0\"",
- "stb2_5 1626006833620ms -3f64 host=\"host0\"",
- "stb2_5 1626006833630ms 3.4f64 host=\"host0\"",
- "stb2_5 1626006833640ms -3.4f64 host=\"host0\"",
- "stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"",
- "stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"",
- "stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"",
- "stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"",
- "stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"",
- "stb2_5 1626006833700ms -1.7E308f64 host=\"host0\"",
- "stb2_5 1626006833710ms 3 host=\"host0\""
+ "stb2_5 1626006833610 3f64 host=\"host0\"",
+ "stb2_5 1626006833620 -3f64 host=\"host0\"",
+ "stb2_5 1626006833630 3.4f64 host=\"host0\"",
+ "stb2_5 1626006833640 -3.4f64 host=\"host0\"",
+ "stb2_5 1626006833650 3.4E10f64 host=\"host0\"",
+ "stb2_5 1626006833660 -3.4e10f64 host=\"host0\"",
+ "stb2_5 1626006833670 3.4E+2f64 host=\"host0\"",
+ "stb2_5 1626006833680 -3.4e-2f64 host=\"host0\"",
+ "stb2_5 1626006833690 1.7E308f64 host=\"host0\"",
+ "stb2_5 1626006833700 -1.7E308f64 host=\"host0\"",
+ "stb2_5 1626006833710 3 host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_5, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -190,16 +186,16 @@ class TDTestCase:
#bool
lines2_6 = [
- "stb2_6 1626006833610ms t host=\"host0\"",
- "stb2_6 1626006833620ms T host=\"host0\"",
- "stb2_6 1626006833630ms true host=\"host0\"",
- "stb2_6 1626006833640ms True host=\"host0\"",
- "stb2_6 1626006833650ms TRUE host=\"host0\"",
- "stb2_6 1626006833660ms f host=\"host0\"",
- "stb2_6 1626006833670ms F host=\"host0\"",
- "stb2_6 1626006833680ms false host=\"host0\"",
- "stb2_6 1626006833690ms False host=\"host0\"",
- "stb2_6 1626006833700ms FALSE host=\"host0\""
+ "stb2_6 1626006833610 t host=\"host0\"",
+ "stb2_6 1626006833620 T host=\"host0\"",
+ "stb2_6 1626006833630 true host=\"host0\"",
+ "stb2_6 1626006833640 True host=\"host0\"",
+ "stb2_6 1626006833650 TRUE host=\"host0\"",
+ "stb2_6 1626006833660 f host=\"host0\"",
+ "stb2_6 1626006833670 F host=\"host0\"",
+ "stb2_6 1626006833680 false host=\"host0\"",
+ "stb2_6 1626006833690 False host=\"host0\"",
+ "stb2_6 1626006833700 FALSE host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_6, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -214,9 +210,9 @@ class TDTestCase:
#binary
lines2_7 = [
- "stb2_7 1626006833610ms \" binary_val .!@#$%^&* \" host=\"host0\"",
- "stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"",
- "stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\""
+ "stb2_7 1626006833610 \" binary_val .!@#$%^&* \" host=\"host0\"",
+ "stb2_7 1626006833620 \"binary_val.:;,./?|+-=\" host=\"host0\"",
+ "stb2_7 1626006833630 \"binary_val.()[]{}<>\" host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_7, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -231,8 +227,8 @@ class TDTestCase:
#nchar
lines2_8 = [
- "stb2_8 1626006833610ms L\" nchar_val 数值一 \" host=\"host0\"",
- "stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\""
+ "stb2_8 1626006833610 L\" nchar_val 数值一 \" host=\"host0\"",
+ "stb2_8 1626006833620 L\"nchar_val数值二\" host=\"host0\""
]
code = self._conn.schemaless_insert(lines2_8, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -249,8 +245,8 @@ class TDTestCase:
print("============= step3 : test tags ================")
#tag value types
lines3_0 = [
- "stb3_0 1626006833610ms 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"",
- "stb3_0 1626006833610ms 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\""
+ "stb3_0 1626006833610 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"",
+ "stb3_0 1626006833610 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\""
]
code = self._conn.schemaless_insert(lines3_0, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -292,9 +288,9 @@ class TDTestCase:
#tag ID as child table name
lines3_1 = [
- "stb3_1 1626006833610ms 1 id=child_table1 host=host1",
- "stb3_1 1626006833610ms 2 host=host2 iD=child_table2",
- "stb3_1 1626006833610ms 3 ID=child_table3 host=host3"
+ "stb3_1 1626006833610 1 id=child_table1 host=host1",
+ "stb3_1 1626006833610 2 host=host2 iD=child_table2",
+ "stb3_1 1626006833610 3 ID=child_table3 host=host3"
]
code = self._conn.schemaless_insert(lines3_1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
@@ -308,6 +304,56 @@ class TDTestCase:
tdSql.checkData(0, 0, "child_table1")
+ ### special characters and keywords ###
+ print("============= step4 : test special characters and keywords ================")
+ lines4_1 = [
+ "1234 1626006833610ms 1 id=123 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
+ "int 1626006833610ms 2 id=and 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
+ "double 1626006833610ms 2 id=for 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
+ "from 1626006833610ms 2 id=!@#.^& 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
+ "!@#$.%^&*() 1626006833610ms 2 id=none 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
+ "STABLE 1626006833610ms 2 id=KEY 456=true int=true double=false TAG=1 FROM=2 COLUMN=false",
+ ]
+
+ code = self._conn.schemaless_insert(lines4_1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
+ print("schemaless_insert result {}".format(code))
+
+ tdSql.query('describe `1234`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `int`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `double`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `from`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `!@#$.%^&*()`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `stable`')
+ tdSql.checkRows(8)
+
+ tdSql.query('select * from `123`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `and`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `for`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `!@#.^&`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `none`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `key`')
+ tdSql.checkRows(1)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/insert/line_insert.py b/tests/pytest/insert/line_insert.py
index 334ccd4e6ec8c6058afe2f115fda61bda6428a14..ff26483aeb323ebd309ba7a41e91ac860af9d222 100644
--- a/tests/pytest/insert/line_insert.py
+++ b/tests/pytest/insert/line_insert.py
@@ -85,6 +85,53 @@ class TDTestCase:
tdSql.query('select tbname, * from childtable')
tdSql.checkRows(1)
+
+ ###Special Character and keyss
+ self._conn.schemaless_insert([
+ "1234,id=3456,abc=4i64,def=3i64 123=3i64,int=2i64,bool=false,into=5f64,column=7u64,!@#$.%^&*()=false 1626006933641",
+ "int,id=and,123=4i64,smallint=5f64,double=5f64,of=3i64,key=L\"passitagin_stf\",!@#$.%^&*()=false abc=false 1626006933654",
+ "double,id=for,123=4i64,smallint=5f64,double=5f64,of=3i64,key=L\"passitagin_stf\",!@#$.%^&*()=false abc=false 1626006933664",
+ "from,id=!@#$.%^,123=4i64,smallint=5f64,double=5f64,of=3i64,key=L\"passitagin_stf\",!@#$.%^&*()=false abc=false 1626006933674",
+ "!@#$.%^&*(),id=none,123=4i64,smallint=5f64,double=5f64,of=3i64,key=L\"passitagin_stf\",!@#$.%^&*()=false abc=false 1626006933684",
+ "STABLE,id=CREATE,123=4i64,smallint=5f64,DOUBLE=5f64,of=3i64,key=L\"passitagin_stf\",!@#$.%^&*()=false SELECT=false 1626006933684",
+ ], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MILLI_SECOND.value)
+ tdSql.execute('reset query cache')
+
+ tdSql.query('describe `1234`')
+ tdSql.checkRows(9)
+
+ tdSql.query('describe `int`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `double`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `from`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `!@#$.%^&*()`')
+ tdSql.checkRows(8)
+
+ tdSql.query('describe `stable`')
+ tdSql.checkRows(8)
+
+ tdSql.query('select * from `3456`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `and`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `for`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `!@#$.%^`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `none`')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from `create`')
+ tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/insert/openTsdbJsonInsert.py b/tests/pytest/insert/openTsdbJsonInsert.py
new file mode 100644
index 0000000000000000000000000000000000000000..69099787b42947901dd5afcf567d1edd3361dc03
--- /dev/null
+++ b/tests/pytest/insert/openTsdbJsonInsert.py
@@ -0,0 +1,1775 @@
+###################################################################
+# Copyright (c) 2021 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import traceback
+import random
+from taos.error import SchemalessError
+import time
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import tdCom
+from util.types import TDSmlProtocolType
+import threading
+import json
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+ self._conn = conn
+
+ def createDb(self, name="test", db_update_tag=0):
+ if db_update_tag == 0:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'us'")
+ else:
+ tdSql.execute(f"drop database if exists {name}")
+ tdSql.execute(f"create database if not exists {name} precision 'us' update 1")
+ tdSql.execute(f'use {name}')
+
+ def timeTrans(self, ts_value):
+ if type(ts_value) is int:
+ if ts_value != 0:
+ if len(str(ts_value)) == 13:
+ ts = int(ts_value)/1000
+ elif len(str(ts_value)) == 10:
+ ts = int(ts_value)/1
+ else:
+ ts = ts_value/1000000
+ else:
+ ts = time.time()
+ elif type(ts_value) is dict:
+ if ts_value["type"].lower() == "ns":
+ ts = ts_value["value"]/1000000000
+ elif ts_value["type"].lower() == "us":
+ ts = ts_value["value"]/1000000
+ elif ts_value["type"].lower() == "ms":
+ ts = ts_value["value"]/1000
+ elif ts_value["type"].lower() == "s":
+ ts = ts_value["value"]/1
+ else:
+ ts = ts_value["value"]/1000000
+ else:
+ print("input ts maybe not right format")
+ ulsec = repr(ts).split('.')[1][:6]
+ if len(ulsec) < 6 and int(ulsec) != 0:
+ ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
+ elif int(ulsec) == 0:
+ ulsec *= 6
+ # * follow two rows added for tsCheckCase
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ return td_ts
+ #td_ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
+ td_ts = time.strftime("%Y-%m-%d %H:%M:%S.{}".format(ulsec), time.localtime(ts))
+ return td_ts
+
+ def dateToTs(self, datetime_input):
+ return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
+
+ def typeTrans(self, type_list):
+ type_num_list = []
+ for tp in type_list:
+ if type(tp) is dict:
+ tp = tp['type']
+ if tp.upper() == "TIMESTAMP":
+ type_num_list.append(9)
+ elif tp.upper() == "BOOL":
+ type_num_list.append(1)
+ elif tp.upper() == "TINYINT":
+ type_num_list.append(2)
+ elif tp.upper() == "SMALLINT":
+ type_num_list.append(3)
+ elif tp.upper() == "INT":
+ type_num_list.append(4)
+ elif tp.upper() == "BIGINT":
+ type_num_list.append(5)
+ elif tp.upper() == "FLOAT":
+ type_num_list.append(6)
+ elif tp.upper() == "DOUBLE":
+ type_num_list.append(7)
+ elif tp.upper() == "BINARY":
+ type_num_list.append(8)
+ elif tp.upper() == "NCHAR":
+ type_num_list.append(10)
+ elif tp.upper() == "BIGINT UNSIGNED":
+ type_num_list.append(14)
+ return type_num_list
+
+ def inputHandle(self, input_json):
+ stb_name = input_json["metric"]
+ stb_tag_dict = input_json["tags"]
+ stb_col_dict = input_json["value"]
+ ts_value = self.timeTrans(input_json["timestamp"])
+ tag_name_list = []
+ tag_value_list = []
+ td_tag_value_list = []
+ td_tag_type_list = []
+
+ col_name_list = []
+ col_value_list = []
+ td_col_value_list = []
+ td_col_type_list = []
+
+ # handle tag
+ for key,value in stb_tag_dict.items():
+ if "id" == key.lower():
+ tb_name = value
+ else:
+ if type(value) is dict:
+ tag_value_list.append(str(value["value"]))
+ td_tag_value_list.append(str(value["value"]))
+ tag_name_list.append(key.lower())
+ td_tag_type_list.append(value["type"].upper())
+ tb_name = ""
+ else:
+ tag_value_list.append(str(value))
+ # td_tag_value_list.append(str(value))
+ tag_name_list.append(key.lower())
+ tb_name = ""
+
+ if type(value) is bool:
+ td_tag_type_list.append("BOOL")
+ td_tag_value_list.append(str(value))
+ elif type(value) is int:
+ # td_tag_type_list.append("BIGINT")
+ td_tag_type_list.append("DOUBLE")
+ td_tag_value_list.append(str(float(value)))
+ elif type(value) is float:
+ td_tag_type_list.append("DOUBLE")
+ td_tag_value_list.append(str(float(value)))
+ elif type(value) is str:
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ td_tag_type_list.append("NCHAR")
+ td_tag_value_list.append(str(value))
+ else:
+ td_tag_type_list.append("BINARY")
+ td_tag_value_list.append(str(value))
+
+ # handle col
+ if type(stb_col_dict) is dict:
+ if stb_col_dict["type"].lower() == "bool":
+ bool_value = f'{stb_col_dict["value"]}'
+ col_value_list.append(bool_value)
+ td_col_type_list.append(stb_col_dict["type"].upper())
+ col_name_list.append("value")
+ td_col_value_list.append(str(stb_col_dict["value"]))
+ else:
+ col_value_list.append(stb_col_dict["value"])
+ td_col_type_list.append(stb_col_dict["type"].upper())
+ col_name_list.append("value")
+ td_col_value_list.append(str(stb_col_dict["value"]))
+ else:
+ col_name_list.append("value")
+ col_value_list.append(str(stb_col_dict))
+ # td_col_value_list.append(str(stb_col_dict))
+ if type(stb_col_dict) is bool:
+ td_col_type_list.append("BOOL")
+ td_col_value_list.append(str(stb_col_dict))
+ elif type(stb_col_dict) is int:
+ td_col_type_list.append("DOUBLE")
+ td_col_value_list.append(str(float(stb_col_dict)))
+ elif type(stb_col_dict) is float:
+ td_col_type_list.append("DOUBLE")
+ td_col_value_list.append(str(float(stb_col_dict)))
+ elif type(stb_col_dict) is str:
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ td_col_type_list.append("NCHAR")
+ td_col_value_list.append(str(stb_col_dict))
+ else:
+ td_col_type_list.append("BINARY")
+ td_col_value_list.append(str(stb_col_dict))
+
+ final_field_list = []
+ final_field_list.extend(col_name_list)
+ final_field_list.extend(tag_name_list)
+
+ final_type_list = []
+ final_type_list.append("TIMESTAMP")
+ final_type_list.extend(td_col_type_list)
+ final_type_list.extend(td_tag_type_list)
+ final_type_list = self.typeTrans(final_type_list)
+
+ final_value_list = []
+ final_value_list.append(ts_value)
+ final_value_list.extend(td_col_value_list)
+ final_value_list.extend(td_tag_value_list)
+ return final_value_list, final_field_list, final_type_list, stb_name, tb_name
+
+ def genTsColValue(self, value, t_type=None, value_type="obj"):
+ if value_type == "obj":
+ if t_type == None:
+ ts_col_value = value
+ else:
+ ts_col_value = {"value": value, "type": t_type}
+ elif value_type == "default":
+ ts_col_value = value
+ return ts_col_value
+
+ def genTagValue(self, t0_type="bool", t0_value="", t1_type="tinyint", t1_value=127, t2_type="smallint", t2_value=32767,
+ t3_type="int", t3_value=2147483647, t4_type="bigint", t4_value=9223372036854775807,
+ t5_type="float", t5_value=11.12345027923584, t6_type="double", t6_value=22.123456789,
+ t7_type="binary", t7_value="binaryTagValue", t8_type="nchar", t8_value="ncharTagValue", value_type="obj"):
+ if t0_value == "":
+ t0_value = random.choice([True, False])
+ if value_type == "obj":
+ tag_value = {
+ "t0": {"value": t0_value, "type": t0_type},
+ "t1": {"value": t1_value, "type": t1_type},
+ "t2": {"value": t2_value, "type": t2_type},
+ "t3": {"value": t3_value, "type": t3_type},
+ "t4": {"value": t4_value, "type": t4_type},
+ "t5": {"value": t5_value, "type": t5_type},
+ "t6": {"value": t6_value, "type": t6_type},
+ "t7": {"value": t7_value, "type": t7_type},
+ "t8": {"value": t8_value, "type": t8_type}
+ }
+ elif value_type == "default":
+ # t5_value = t6_value
+ tag_value = {
+ "t0": t0_value,
+ "t1": t1_value,
+ "t2": t2_value,
+ "t3": t3_value,
+ "t4": t4_value,
+ "t5": t5_value,
+ "t6": t6_value,
+ "t7": t7_value,
+ "t8": t8_value
+ }
+ return tag_value
+
+ def genFullTypeJson(self, ts_value="", col_value="", tag_value="", stb_name="", tb_name="",
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ chinese_tag=None, multi_field_tag=None, point_trans_tag=None, value_type="obj"):
+ if value_type == "obj":
+ if stb_name == "":
+ stb_name = tdCom.getLongName(len=6, mode="letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if ts_value == "":
+ ts_value = self.genTsColValue(1626006833639000000, "ns")
+ if col_value == "":
+ col_value = self.genTsColValue(random.choice([True, False]), "bool")
+ if tag_value == "":
+ tag_value = self.genTagValue()
+ if id_upper_tag is not None:
+ id = "ID"
+ else:
+ id = "id"
+ if id_mixul_tag is not None:
+ id = random.choice(["iD", "Id"])
+ else:
+ id = "id"
+ if id_noexist_tag is None:
+ tag_value[id] = tb_name
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_noexist_tag is not None:
+ if t_add_tag is not None:
+ tag_value["t9"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_change_tag is not None:
+ tag_value.pop('t8')
+ tag_value["t8"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_double_tag is not None:
+ tag_value["ID"] = f'"{tb_name}_2"'
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_add_tag is not None:
+ tag_value["t10"] = {"value": "ncharTagValue", "type": "nchar"}
+ tag_value["t11"] = {"value": True, "type": "bool"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_mul_tag is not None:
+ tag_value.pop('t8')
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_multi_tag is not None:
+ col_value = [{"value": True, "type": "bool"}, {"value": False, "type": "bool"}]
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_blank_tag is not None:
+ tag_value = {"id": tdCom.getLongName(len=6, mode="letters")}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if chinese_tag is not None:
+ tag_value = {"id": "abc", "t0": {"value": "涛思数据", "type": "nchar"}}
+ col_value = {"value": "涛思数据", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_blank_tag is not None:
+ sql_json.pop("value")
+ if multi_field_tag is not None:
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value, "tags2": tag_value}
+ if point_trans_tag is not None:
+ sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value}
+
+ elif value_type == "default":
+ if stb_name == "":
+ stb_name = tdCom.getLongName(len=6, mode="letters")
+ if tb_name == "":
+ tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
+ if ts_value == "":
+ ts_value = 1626006834
+ if col_value == "":
+ col_value = random.choice([True, False])
+ if tag_value == "":
+ tag_value = self.genTagValue(value_type=value_type)
+ if id_upper_tag is not None:
+ id = "ID"
+ else:
+ id = "id"
+ if id_mixul_tag is not None:
+ id = "iD"
+ else:
+ id = "id"
+ if id_noexist_tag is None:
+ tag_value[id] = tb_name
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_noexist_tag is not None:
+ if t_add_tag is not None:
+ tag_value["t9"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_change_tag is not None:
+ tag_value.pop('t7')
+ tag_value["t7"] = {"value": "ncharTagValue", "type": "nchar"}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if id_double_tag is not None:
+ tag_value["ID"] = f'"{tb_name}_2"'
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_add_tag is not None:
+ tag_value["t10"] = {"value": "ncharTagValue", "type": "nchar"}
+ tag_value["t11"] = True
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_mul_tag is not None:
+ tag_value.pop('t7')
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_multi_tag is not None:
+ col_value = True,False
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if t_blank_tag is not None:
+ tag_value = {"id": tdCom.getLongName(len=6, mode="letters")}
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ if c_blank_tag is not None:
+ sql_json.pop("value")
+ if multi_field_tag is not None:
+ sql_json = {"metric": stb_name, "timestamp": ts_value, "value": col_value, "tags": tag_value, "tags2": tag_value}
+ if point_trans_tag is not None:
+ sql_json = {"metric": ".point.trans.test", "timestamp": ts_value, "value": col_value, "tags": tag_value}
+ return sql_json, stb_name
+
+ def genMulTagColDict(self, genType, count=1, value_type="obj"):
+ """
+ genType must be tag/col
+ """
+ tag_dict = dict()
+ col_dict = dict()
+ if value_type == "obj":
+ if genType == "tag":
+ for i in range(0, count):
+ tag_dict[f't{i}'] = {'value': True, 'type': 'bool'}
+ return tag_dict
+ if genType == "col":
+ col_dict = {'value': True, 'type': 'bool'}
+ return col_dict
+ elif value_type == "default":
+ if genType == "tag":
+ for i in range(0, count):
+ tag_dict[f't{i}'] = True
+ return tag_dict
+ if genType == "col":
+ col_dict = True
+ return col_dict
+
+ def genLongJson(self, tag_count, value_type="obj"):
+ stb_name = tdCom.getLongName(7, mode="letters")
+ tb_name = f'{stb_name}_1'
+ tag_dict = self.genMulTagColDict("tag", tag_count, value_type)
+ col_dict = self.genMulTagColDict("col", 1, value_type)
+ tag_dict["id"] = tb_name
+ ts_dict = {'value': 1626006833639000000, 'type': 'ns'}
+ long_json = {"metric": stb_name, "timestamp": ts_dict, "value": col_dict, "tags": tag_dict}
+ return long_json, stb_name
+
+ def getNoIdTbName(self, stb_name):
+ query_sql = f"select tbname from {stb_name}"
+ tb_name = self.resHandle(query_sql, True)[0][0]
+ return tb_name
+
+ def resHandle(self, query_sql, query_tag):
+ tdSql.execute('reset query cache')
+ row_info = tdSql.query(query_sql, query_tag)
+ col_info = tdSql.getColNameList(query_sql, query_tag)
+ res_row_list = []
+ sub_list = []
+ for row_mem in row_info:
+ for i in row_mem:
+ if "11.1234" in str(i):
+ sub_list.append("11.12345027923584")
+ elif "22.1234" in str(i):
+ sub_list.append("22.123456789")
+ else:
+ sub_list.append(str(i))
+ res_row_list.append(sub_list)
+ res_field_list_without_ts = col_info[0][1:]
+ res_type_list = col_info[1]
+ return res_row_list, res_field_list_without_ts, res_type_list
+
+ def resCmp(self, input_json, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None, none_type_check=None):
+ expect_list = self.inputHandle(input_json)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ query_sql = f"{query_sql} {stb_name} {condition}"
+ res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True)
+ if ts == 0:
+ res_ts = self.dateToTs(res_row_list[0][0])
+ current_time = time.time()
+ if current_time - res_ts < 60:
+ tdSql.checkEqual(res_row_list[0][1:], expect_list[0][1:])
+ else:
+ print("timeout")
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ else:
+ if none_check_tag is not None:
+ none_index_list = [i for i,x in enumerate(res_row_list[0]) if x=="None"]
+ none_index_list.reverse()
+ for j in none_index_list:
+ res_row_list[0].pop(j)
+ expect_list[0].pop(j)
+ tdSql.checkEqual(res_row_list[0], expect_list[0])
+ tdSql.checkEqual(res_field_list_without_ts, expect_list[1])
+ if none_type_check is None:
+ for i in range(len(res_type_list)):
+ tdSql.checkEqual(res_type_list[i], expect_list[2][i])
+ # tdSql.checkEqual(res_type_list, expect_list[2])
+
+ def initCheckCase(self, value_type="obj"):
+ """
+ normal tags and cols, one for every elm
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(value_type=value_type)
+ self.resCmp(input_json, stb_name)
+
+ def boolTypeCheckCase(self):
+ """
+ check all normal type
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
+ for t_type in full_type_list:
+ input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0],
+ self.genFullTypeJson(col_value=self.genTsColValue(value=t_type, t_type="bool"))[0]]
+ for input_json in input_json_list:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def symbolsCheckCase(self, value_type="obj"):
+ """
+ check symbols = `~!@#$%^&*()_-+={[}]\|:;'\",<.>/?
+ """
+ '''
+ please test :
+ binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
+ nchar_symbols = binary_symbols
+ input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
+ tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type))
+ input_sql2, stb_name2 = self.genFullTypeJson(col_value=self.genTsColValue(value=nchar_symbols, t_type="nchar", value_type=value_type),
+ tag_value=self.genTagValue(t7_value=binary_symbols, t8_value=nchar_symbols, value_type=value_type))
+ self.resCmp(input_sql1, stb_name1)
+ self.resCmp(input_sql2, stb_name2)
+
+ def tsCheckCase(self, value_type="obj"):
+ """
+ test ts list --> ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
+ # ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0]
+ for ts in ts_list:
+ if "s" in str(ts):
+ input_json, stb_name = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(tdCom.splitNumLetter(ts)[0]), t_type=tdCom.splitNumLetter(ts)[1]))
+ self.resCmp(input_json, stb_name, ts=ts)
+ else:
+ input_json, stb_name = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="s", value_type=value_type))
+ self.resCmp(input_json, stb_name, ts=ts)
+ if int(ts) == 0:
+ if value_type == "obj":
+ input_json_list = [self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="ns")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="us")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="ms")),
+ self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type="s"))]
+ elif value_type == "default":
+ input_json_list = [self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), value_type=value_type))]
+ for input_json in input_json_list:
+ self.resCmp(input_json[0], input_json[1], ts=ts)
+ else:
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value=int(ts), t_type=""))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # check result
+ #! bug
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ms'")
+ tdSql.execute("use test_ts")
+ input_json = [{"metric": "test_ms", "timestamp": {"value": 1626006833640, "type": "ms"}, "value": True, "tags": {"t0": True}},
+ {"metric": "test_ms", "timestamp": {"value": 1626006833641, "type": "ms"}, "value": False, "tags": {"t0": True}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ res = tdSql.query('select * from test_ms', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000")
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'us'")
+ tdSql.execute("use test_ts")
+ input_json = [{"metric": "test_us", "timestamp": {"value": 1626006833639000, "type": "us"}, "value": True, "tags": {"t0": True}},
+ {"metric": "test_us", "timestamp": {"value": 1626006833639001, "type": "us"}, "value": False, "tags": {"t0": True}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ res = tdSql.query('select * from test_us', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.639000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.639001")
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ns'")
+ tdSql.execute("use test_ts")
+ input_json = [{"metric": "test_ns", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": True, "tags": {"t0": True}},
+ {"metric": "test_ns", "timestamp": {"value": 1626006833639000001, "type": "ns"}, "value": False, "tags": {"t0": True}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ res = tdSql.query('select * from test_ns', True)
+ tdSql.checkEqual(str(res[0][0]), "1626006833639000000")
+ tdSql.checkEqual(str(res[1][0]), "1626006833639000001")
+ self.createDb()
+
+ def idSeqCheckCase(self, value_type="obj"):
+ """
+ check id.index in tags
+ eg: t0=**,id=**,t1=**
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+
+ def idLetterCheckCase(self, value_type="obj"):
+ """
+ check id param
+ eg: id and ID
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(id_change_tag=True, id_upper_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+
+ def noIdCheckCase(self, value_type="obj"):
+ """
+ id not exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ query_sql = f"select tbname from {stb_name}"
+ res_row_list = self.resHandle(query_sql, True)[0]
+ if len(res_row_list[0][0]) > 0:
+ tdSql.checkColNameList(res_row_list, res_row_list)
+ else:
+ tdSql.checkColNameList(res_row_list, "please check noIdCheckCase")
+
+ def maxColTagCheckCase(self, value_type="obj"):
+ """
+ max tag count is 128
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ for input_json in [self.genLongJson(128, value_type)[0]]:
+ tdCom.cleanTb()
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ for input_json in [self.genLongJson(129, value_type)[0]]:
+ tdCom.cleanTb()
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idIllegalNameCheckCase(self, value_type="obj"):
+ """
+ test illegal id name
+ mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
+ for i in rstr:
+ input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def idStartWithNumCheckCase(self, value_type="obj"):
+ """
+ id is start with num
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def nowTsCheckCase(self, value_type="obj"):
+ """
+ check now unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def dateFormatTsCheckCase(self, value_type="obj"):
+ """
+ check date format ts unsupported
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def illegalTsCheckCase(self, value_type="obj"):
+ """
+ check ts format like 16260068336390us19
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tbnameCheckCase(self, value_type="obj"):
+ """
+ check length 192
+ check upper tbname
+ chech upper tag
+ length of stb_name tb_name <= 192
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name_192, tb_name=tb_name_192, value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+ for input_json in [self.genFullTypeJson(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"), value_type=value_type)[0], self.genFullTypeJson(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ input_json = {'metric': 'Abcdffgg', 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': {'value': False, 'type': 'bool'}, 'tags': {'T1': {'value': 127, 'type': 'tinyint'}, "T2": 127, 'id': 'Abcddd'}}
+ stb_name = "Abcdffgg"
+ self.resCmp(input_json, stb_name)
+
+ def tagNameLengthCheckCase(self):
+ """
+ check tag name limit <= 62
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tag_name = tdCom.getLongName(61, "letters")
+ tag_name = f'T{tag_name}'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {'metric': stb_name, 'timestamp': {'value': 1626006833639000000, 'type': 'ns'}, 'value': "bcdaaa", 'tags': {tag_name: {'value': False, 'type': 'bool'}}}
+ self.resCmp(input_json, stb_name)
+ input_json = {'metric': stb_name, 'timestamp': {'value': 1626006833639000001, 'type': 'ns'}, 'value': "bcdaaaa", 'tags': {tdCom.getLongName(65, "letters"): {'value': False, 'type': 'bool'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagValueLengthCheckCase(self, value_type="obj"):
+ """
+ check full type tag value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # i8
+ for t1 in [-127, 127]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t1 in [-128, 128]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i16
+ for t2 in [-32767, 32767]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t2_value=t2, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t2 in [-32768, 32768]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t2_value=t2))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i32
+ for t3 in [-2147483647, 2147483647]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t3_value=t3, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t3 in [-2147483648, 2147483648]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t3_value=t3))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ #i64
+ for t4 in [-9223372036854775807, 9223372036854775807]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+
+ for t4 in [-9223372036854775808, 9223372036854775808]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t4_value=t4))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ for t5 in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=t5, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # * limit set to 3.4028234664*(10**38)
+ for t5 in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=t5))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ for t6 in [-1.79769*(10**308), -1.79769*(10**308)]:
+ input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t6_value=t6, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ for t6 in [float(-1.797693134862316*(10**308)), -1.797693134862316*(10**308)]:
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t6_value=t6, value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ if value_type == "obj":
+ # binary
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # # nchar
+ # # * legal nchar could not be larger than 16374/4
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': True, 'type': 'bool'}, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1":{'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ elif value_type == "default":
+ stb_name = tdCom.getLongName(7, "letters")
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(16374, "letters")}}
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(4093, "letters")}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(16375, "letters")}}
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": True, "tags": {"t0": {'value': True, 'type': 'bool'}, "t1": tdCom.getLongName(4094, "letters")}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def colValueLengthCheckCase(self, value_type="obj"):
+ """
+ check full type col value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # i8
+ for value in [-127, 127]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-128, 128]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ # i16
+ tdCom.cleanTb()
+ for value in [-32767]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-32768, 32768]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i32
+ tdCom.cleanTb()
+ for value in [-2147483647]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-2147483648, 2147483648]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i64
+ tdCom.cleanTb()
+ for value in [-9223372036854775807]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tdCom.cleanTb()
+ for value in [-9223372036854775808, 9223372036854775808]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f32
+ tdCom.cleanTb()
+ for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # * limit set to 4028234664*(10**38)
+ tdCom.cleanTb()
+ for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # f64
+ tdCom.cleanTb()
+ for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]:
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ # * limit set to 1.797693134862316*(10**308)
+ tdCom.cleanTb()
+ for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]:
+ input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ if value_type == "obj":
+ # binary
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ tdCom.cleanTb()
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # nchar
+ # * legal nchar could not be larger than 16374/4
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ tdCom.cleanTb()
+ input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ elif value_type == "default":
+ # binary
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdCom.cleanTb()
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4094, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagColIllegalValueCheckCase(self, value_type="obj"):
+
+ """
+ test illegal tag col value
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ # bool
+ for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
+ try:
+ input_json1 = self.genFullTypeJson(tag_value=self.genTagValue(t0_value=i))[0]
+ self._conn.schemaless_insert([json.dumps(input_json1)], 2, None)
+ input_json2 = self.genFullTypeJson(col_value=self.genTsColValue(value=i, t_type="bool"))[0]
+ self._conn.schemaless_insert([json.dumps(input_json2)], 2, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # i8 i16 i32 i64 f32 f64
+ for input_json in [
+ self.genFullTypeJson(tag_value=self.genTagValue(t1_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t2_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t3_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t4_value="1s2"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t5_value="11.1s45"))[0],
+ self.genFullTypeJson(tag_value=self.genTagValue(t6_value="11.1s45"))[0],
+ ]:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check binary and nchar blank
+ input_sql1 = self.genFullTypeJson(col_value=self.genTsColValue(value="abc aaa", t_type="binary", value_type=value_type))[0]
+ input_sql2 = self.genFullTypeJson(col_value=self.genTsColValue(value="abc aaa", t_type="nchar", value_type=value_type))[0]
+ input_sql3 = self.genFullTypeJson(tag_value=self.genTagValue(t7_value="abc aaa", value_type=value_type))[0]
+ input_sql4 = self.genFullTypeJson(tag_value=self.genTagValue(t8_value="abc aaa", value_type=value_type))[0]
+ for input_json in [input_sql1, input_sql2, input_sql3, input_sql4]:
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ # check accepted binary and nchar symbols
+ # # * ~!@#$¥%^&*()-+={}|[]、「」:;
+ for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
+ input_json1 = self.genFullTypeJson(col_value=self.genTsColValue(value=f"abc{symbol}aaa", t_type="binary", value_type=value_type))[0]
+ input_json2 = self.genFullTypeJson(tag_value=self.genTagValue(t8_value=f"abc{symbol}aaa", value_type=value_type))[0]
+ self._conn.schemaless_insert([json.dumps(input_json1)], TDSmlProtocolType.JSON.value, None)
+ self._conn.schemaless_insert([json.dumps(input_json2)], TDSmlProtocolType.JSON.value, None)
+
+ def duplicateIdTagColInsertCheckCase(self, value_type="obj"):
+ """
+ check duplicate Id Tag Col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ input_json = self.genFullTypeJson(tag_value=self.genTagValue(t5_value=11.12345027923584, t6_type="float", t6_value=22.12345027923584, value_type=value_type))[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json).replace("t6", "t5")], 2, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ ##### stb exist #####
+ def noIdStbExistCheckCase(self, value_type="obj"):
+ """
+ case no id when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name, condition='where tbname like "t_%"')
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def duplicateInsertExistCheckCase(self, value_type="obj"):
+ """
+ check duplicate insert when stb exist
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(value_type=value_type)
+ self.resCmp(input_json, stb_name)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ self.resCmp(input_json, stb_name)
+
+ def tagColBinaryNcharLengthCheckCase(self, value_type="obj"):
+ """
+ check length increase
+ """
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(value_type=value_type)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ self.resCmp(input_json, stb_name)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, tag_value=self.genTagValue(t7_value="binaryTagValuebinaryTagValue", t8_value="ncharTagValuencharTagValue", value_type=value_type))
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"')
+
+ def lengthIcreaseCrashCheckCase(self):
+ """
+ check length increase
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = "test_crash"
+ input_json = self.genFullTypeJson(stb_name=stb_name)[0]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ os.system('python3 query/schemalessQueryCrash.py &')
+ time.sleep(2)
+ tb_name = tdCom.getLongName(5, "letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, tag_value=self.genTagValue(t7_value="binaryTagValuebinaryTagValue", t8_value="ncharTagValuencharTagValue"))
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ time.sleep(3)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def tagColAddDupIDCheckCase(self, value_type="obj"):
+ """
+ check tag count add, stb and tb duplicate
+ * tag: alter table ...
+ * col: when update==0 and ts is same, unchange
+ * so this case tag&&value will be added,
+ * col is added without value when update==0
+ * col is added with value when update==1
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ for db_update_tag in [0, 1]:
+ if db_update_tag == 1 :
+ self.createDb("test_update", db_update_tag=db_update_tag)
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=False, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), t_add_tag=True)
+ if db_update_tag == 1 :
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ else:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ self.createDb()
+
+ def tagAddCheckCase(self, value_type="obj"):
+ """
+ check tag count add
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ tb_name_1 = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name_1, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), t_add_tag=True)
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name_1}"')
+ res_row_list = self.resHandle(f"select t10,t11 from {tb_name}", True)[0]
+ tdSql.checkEqual(res_row_list[0], ['None', 'None'])
+ self.resCmp(input_json, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+
+ def tagMd5Check(self, value_type="obj"):
+ """
+ condition: stb not change
+ insert two table, keep tag unchange, change col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
+ self.resCmp(input_json, stb_name)
+ tb_name1 = self.getNoIdTbName(stb_name)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
+ self.resCmp(input_json, stb_name)
+ tb_name2 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(1)
+ tdSql.checkEqual(tb_name1, tb_name2)
+ input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True, t_add_tag=True)
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tb_name3 = self.getNoIdTbName(stb_name)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ tdSql.checkNotEqual(tb_name1, tb_name3)
+
+ # * tag binary max is 16384, col+ts binary max 49151
+ def tagColBinaryMaxLengthCheckCase(self, value_type="obj"):
+ """
+ every binary and nchar must be length+2
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ tag_value = {"t0": {"value": True, "type": "bool"}}
+ tag_value["id"] = tb_name
+ col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type)
+ input_json = {"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": col_value, "tags": tag_value}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
+ if value_type == "obj":
+ tag_value["t1"] = {"value": tdCom.getLongName(16374, "letters"), "type": "binary"}
+ tag_value["t2"] = {"value": tdCom.getLongName(5, "letters"), "type": "binary"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t1"] = tdCom.getLongName(16374, "letters")
+ tag_value["t2"] = tdCom.getLongName(5, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t1"] = tdCom.getLongName(4093, "letters")
+ tag_value["t2"] = tdCom.getLongName(1, "letters")
+ tag_value.pop('id')
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ if value_type == "obj":
+ tag_value["t2"] = {"value": tdCom.getLongName(6, "letters"), "type": "binary"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t2"] = tdCom.getLongName(6, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t2"] = tdCom.getLongName(2, "letters")
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ # * tag nchar max is 16374/4, col+ts nchar max 49151
+ def tagColNcharMaxLengthCheckCase(self, value_type="obj"):
+ """
+ check nchar length limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
+ tb_name = f'{stb_name}_1'
+ tag_value = {"t0": True}
+ tag_value["id"] = tb_name
+ col_value= True
+ input_json = {"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": col_value, "tags": tag_value}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+
+ # * legal nchar could not be larger than 16374/4
+ if value_type == "obj":
+ tag_value["t1"] = {"value": tdCom.getLongName(4093, "letters"), "type": "nchar"}
+ tag_value["t2"] = {"value": tdCom.getLongName(1, "letters"), "type": "nchar"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t1"] = tdCom.getLongName(16374, "letters")
+ tag_value["t2"] = tdCom.getLongName(5, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t1"] = tdCom.getLongName(4093, "letters")
+ tag_value["t2"] = tdCom.getLongName(1, "letters")
+ tag_value.pop('id')
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+ if value_type == "obj":
+ tag_value["t2"] = {"value": tdCom.getLongName(2, "letters"), "type": "binary"}
+ elif value_type == "default":
+ if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
+ tag_value["t2"] = tdCom.getLongName(6, "letters")
+ elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
+ tag_value["t2"] = tdCom.getLongName(2, "letters")
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(2)
+
+ def batchInsertCheckCase(self, value_type="obj"):
+ """
+ test batch insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = "stb_name"
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+ input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833640000000, "type": "ns"}, "value": {"value": 2, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811823316532, "type": "ns"}, "value": {"value": 3, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste", "type": "nchar"}}},
+ {"metric": "stf567890", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 4, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833642000000, "type": "ns"}, "value": {"value": 5, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t2": {"value": 5, "type": "double"}, "t3": {"value": "t4", "type": "binary"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811843316532, "type": "ns"}, "value": {"value": 6, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056812843316532, "type": "ns"}, "value": {"value": 7, "type": "bigint"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 8, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
+ if value_type != "obj":
+ input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": 1, "tags": {"t1": 3, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833640000000, "type": "ns"}, "value": 2, "tags": {"t1": {"value": 4, "type": "double"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811823316532, "type": "ns"}, "value": 3, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste", "type": "nchar"}}},
+ {"metric": "stf567890", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": 4, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006833642000000, "type": "ns"}, "value": {"value": 5, "type": "double"}, "tags": {"t1": {"value": 4, "type": "double"}, "t2": 5.0, "t3": {"value": "t4", "type": "binary"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056811843316532, "type": "ns"}, "value": {"value": 6, "type": "double"}, "tags": {"t2": 5.0, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "stb_name", "timestamp": {"value": 1626056812843316532, "type": "ns"}, "value": {"value": 7, "type": "double"}, "tags": {"t2": {"value": 5, "type": "double"}, "t3": {"value": "ste2", "type": "nchar"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933640000000, "type": "ns"}, "value": {"value": 8, "type": "double"}, "tags": {"t1": {"value": 4, "type": "double"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "double"}, "tags": {"t1": 4, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query('show stables')
+ tdSql.checkRows(3)
+ tdSql.query('show tables')
+ tdSql.checkRows(6)
+ tdSql.query('select * from st123456')
+ tdSql.checkRows(5)
+
+ def multiInsertCheckCase(self, count, value_type="obj"):
+ """
+ test multi insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ sql_list = list()
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
+ for i in range(count):
+ input_json = self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True)[0]
+ sql_list.append(input_json)
+ self._conn.schemaless_insert([json.dumps(sql_list)], TDSmlProtocolType.JSON.value, None)
+ tdSql.query('show tables')
+ tdSql.checkRows(count)
+
+ def batchErrorInsertCheckCase(self):
+ """
+ test batch error insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
+ {"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def multiColsInsertCheckCase(self, value_type="obj"):
+ """
+ test multi cols insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankColInsertCheckCase(self, value_type="obj"):
+ """
+ test blank col insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankTagInsertCheckCase(self, value_type="obj"):
+ """
+ test blank tag insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def chineseCheckCase(self):
+ """
+ check nchar ---> chinese
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(chinese_tag=True)
+ self.resCmp(input_json, stb_name)
+
+ def multiFieldCheckCase(self, value_type="obj"):
+ '''
+ multi_field
+ '''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0]
+ try:
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def spellCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}},
+ {"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}},
+ {"metric": f'{stb_name}_3', "timestamp": {"value": 1626006833639000002, "type": "NS"}, "value": {"value": 2147483647, "type": "iNt"}, "tags": {"t1": {"value": 2147483647, "type": "iNt"}}},
+ {"metric": f'{stb_name}_4', "timestamp": {"value": 1626006833639019, "type": "Us"}, "value": {"value": 9223372036854775807, "type": "bigInt"}, "tags": {"t1": {"value": 9223372036854775807, "type": "bigInt"}}},
+ {"metric": f'{stb_name}_5', "timestamp": {"value": 1626006833639018, "type": "uS"}, "value": {"value": 11.12345027923584, "type": "flOat"}, "tags": {"t1": {"value": 11.12345027923584, "type": "flOat"}}},
+ {"metric": f'{stb_name}_6', "timestamp": {"value": 1626006833639017, "type": "US"}, "value": {"value": 22.123456789, "type": "douBle"}, "tags": {"t1": {"value": 22.123456789, "type": "douBle"}}},
+ {"metric": f'{stb_name}_7', "timestamp": {"value": 1626006833640, "type": "Ms"}, "value": {"value": "vozamcts", "type": "binaRy"}, "tags": {"t1": {"value": "vozamcts", "type": "binaRy"}}},
+ {"metric": f'{stb_name}_8', "timestamp": {"value": 1626006833641, "type": "mS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}},
+ {"metric": f'{stb_name}_9', "timestamp": {"value": 1626006833642, "type": "MS"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}},
+ {"metric": f'{stb_name}_10', "timestamp": {"value": 1626006834, "type": "S"}, "value": {"value": "vozamcts", "type": "nchAr"}, "tags": {"t1": {"value": "vozamcts", "type": "nchAr"}}}]
+
+ for input_sql in input_json_list:
+ stb_name = input_sql["metric"]
+ self.resCmp(input_sql, stb_name)
+
+ def tbnameTagsColsNameCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}}
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ query_sql = 'select * from `rfa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), True, False, 127, 32767, 2147483647, 9223372036854775807, 11.12345027923584, 22.123456789, 'binaryTagValue', 'ncharTagValue')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['ts', 'value', 'tt!0', 'tt@1', 't@2', 't$3', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9'])
+ tdSql.execute('drop table `rfa$sta`')
+
+ def pointTransCheckCase(self, value_type="obj"):
+ """
+ metric value "." trans to "_"
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0]
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ tdSql.execute("drop table `.point.trans.test`")
+
+ def genSqlList(self, count=5, stb_name="", tb_name="", value_type="obj"):
+ """
+ stb --> supertable
+ tb --> table
+ ts --> timestamp, same default
+ col --> column, same default
+ tag --> tag, same default
+ d --> different
+ s --> same
+ a --> add
+ m --> minus
+ """
+ d_stb_d_tb_list = list()
+ s_stb_s_tb_list = list()
+ s_stb_s_tb_a_tag_list = list()
+ s_stb_s_tb_m_tag_list = list()
+ s_stb_d_tb_list = list()
+ s_stb_d_tb_m_tag_list = list()
+ s_stb_d_tb_a_tag_list = list()
+ s_stb_s_tb_d_ts_list = list()
+ s_stb_s_tb_d_ts_m_tag_list = list()
+ s_stb_s_tb_d_ts_a_tag_list = list()
+ s_stb_d_tb_d_ts_list = list()
+ s_stb_d_tb_d_ts_m_tag_list = list()
+ s_stb_d_tb_d_ts_a_tag_list = list()
+ for i in range(count):
+ d_stb_d_tb_list.append(self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type)))
+ s_stb_s_tb_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type)))
+ s_stb_s_tb_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), t_add_tag=True))
+ s_stb_s_tb_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), t_mul_tag=True))
+ s_stb_d_tb_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True))
+ s_stb_d_tb_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, t_mul_tag=True))
+ s_stb_d_tb_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, t_add_tag=True))
+ s_stb_s_tb_d_ts_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns")))
+ s_stb_s_tb_d_ts_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"), t_mul_tag=True))
+ s_stb_s_tb_d_ts_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, tb_name=tb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), ts_value = self.genTsColValue(1626006833639000000, "ns"), t_add_tag=True))
+ s_stb_d_tb_d_ts_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(1626006833639000000, "ns")))
+ s_stb_d_tb_d_ts_m_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(0, "ns"), t_mul_tag=True))
+ s_stb_d_tb_d_ts_a_tag_list.append(self.genFullTypeJson(stb_name=stb_name, col_value=self.genTsColValue(value=tdCom.getLongName(8, "letters"), t_type="binary", value_type=value_type), tag_value=self.genTagValue(t7_value=tdCom.getLongName(8, "letters"), value_type=value_type), id_noexist_tag=True, ts_value = self.genTsColValue(0, "ns"), t_add_tag=True))
+
+ return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_tag_list, s_stb_s_tb_m_tag_list, \
+ s_stb_d_tb_list, s_stb_d_tb_m_tag_list, s_stb_d_tb_a_tag_list, s_stb_s_tb_d_ts_list, \
+ s_stb_s_tb_d_ts_m_tag_list, s_stb_s_tb_d_ts_a_tag_list, s_stb_d_tb_d_ts_list, \
+ s_stb_d_tb_d_ts_m_tag_list, s_stb_d_tb_d_ts_a_tag_list
+
+ def genMultiThreadSeq(self, sql_list):
+ tlist = list()
+ for insert_sql in sql_list:
+ t = threading.Thread(target=self._conn.schemaless_insert,args=([json.dumps(insert_sql[0])], TDSmlProtocolType.JSON.value, None))
+ tlist.append(t)
+ return tlist
+
+ def multiThreadRun(self, tlist):
+ for t in tlist:
+ t.start()
+ for t in tlist:
+ t.join()
+
+ def stbInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input different stb
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json = self.genSqlList(value_type=value_type)[0]
+ self.multiThreadRun(self.genMultiThreadSeq(input_json))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(5)
+
+ def sStbStbDdataInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb tb, different data, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[1]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbStbDdataAtInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb tb, different data, add columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[2]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbStbDdataMtInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb tb, different data, minus columes and tags, result keep first data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name, value_type=value_type)[3]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ expected_tb_name = self.getNoIdTbName(stb_name)[0]
+ tdSql.checkEqual(tb_name, expected_tb_name)
+ tdSql.query(f"select * from {stb_name};")
+ tdSql.checkRows(1)
+
+ def sStbDtbDdataInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb, different tb, different data
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, different data, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "vqowydbc", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "plgkckpv", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "cujyqvlj", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "twjxisat", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz')]
+
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(2)
+
+ def sStbDtbDdataAtInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb, different tb, different data, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_d_ts_list = [({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "hkgjiwdj", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "vozamcts", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "rljjrrul", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "bmcanhbs", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "basanglx", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "enqkyvmb", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "clsajzpp", "tags": {"id": tb_name, "t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "eivaegjk", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz'),
+ ({"metric": stb_name, "timestamp": {"value": 0, "type": "ns"}, "value": "jitwseso", "tags": {"id": tb_name, "t0": {"value": True, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}, "t7": {"value": "yhlwkddq", "type": "binary"}, "t8": {"value": "ncharTagValue", "type": "nchar"}}}, 'yzwswz')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rhnikvfq', 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 'id': tb_name}}, 'punftb')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ tdSql.query(f"select * from {stb_name} where t8 is not NULL")
+ tdSql.checkRows(6)
+
+ def sStbStbDdataDtsAtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb tb, different ts, add tag, mul col
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
+ input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_s_tb_d_ts_a_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'tuzsfrom', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'llqzvgvw', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'nttjdzgi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'tclbosqc', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'uatpzgpi', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'rlpuzodt', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'cwnpdnng', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}, 't7': {'value': 'afcibyeb', 'type': 'binary'}, 't8': {'value': 'ncharTagValue', 'type': 'nchar'}, 't11': {'value': 127, 'type': 'tinyint'}, 't10': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': tb_name}}, 'punftb')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(1)
+ tdSql.query(f"select * from {stb_name}")
+ tdSql.checkRows(6)
+ for t in ["t10", "t11"]:
+ tdSql.query(f"select * from {stb_name} where {t} is not NULL;")
+ tdSql.checkRows(0)
+
+ def sStbDtbDdataDtsInsertMultiThreadCheckCase(self, value_type="obj"):
+ """
+ thread input same stb, different tb, data, ts
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(6)
+
+ def sStbDtbDdataDtsMtInsertMultiThreadCheckCase(self):
+ """
+ thread input same stb, different tb, data, ts, add col, mul tag
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
+ self.resCmp(input_json, stb_name)
+ s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'llqzvgvw', 'type': 'binary'}, 'tags': {'t0': {'value': True, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'tclbosqc', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rlpuzodt', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
+ ({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': {'value': 'rhnikvfq', 'type': 'binary'}, 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb')]
+ self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list))
+ tdSql.query(f"show tables;")
+ tdSql.checkRows(3)
+
+ def test(self):
+ try:
+ input_json = f'test_nchar 0 L"涛思数据" t0=f,t1=L"涛思数据",t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64'
+ self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
+ # input_json, stb_name = self.genFullTypeJson()
+ # self.resCmp(input_json, stb_name)
+ except SchemalessError as err:
+ print(err.errno)
+
+ def runAll(self):
+ for value_type in ["obj", "default"]:
+ self.initCheckCase(value_type)
+ self.symbolsCheckCase(value_type)
+ self.tsCheckCase(value_type)
+ self.idSeqCheckCase(value_type)
+ self.idLetterCheckCase(value_type)
+ self.noIdCheckCase(value_type)
+ self.maxColTagCheckCase(value_type)
+ self.idIllegalNameCheckCase(value_type)
+ self.idStartWithNumCheckCase(value_type)
+ self.nowTsCheckCase(value_type)
+ self.dateFormatTsCheckCase(value_type)
+ self.illegalTsCheckCase(value_type)
+ self.tbnameCheckCase(value_type)
+ self.tagValueLengthCheckCase(value_type)
+ self.colValueLengthCheckCase(value_type)
+ self.tagColIllegalValueCheckCase(value_type)
+ self.duplicateIdTagColInsertCheckCase(value_type)
+ self.noIdStbExistCheckCase(value_type)
+ self.duplicateInsertExistCheckCase(value_type)
+ self.tagColBinaryNcharLengthCheckCase(value_type)
+ self.tagColAddDupIDCheckCase(value_type)
+ self.tagAddCheckCase(value_type)
+ self.tagMd5Check(value_type)
+ self.tagColBinaryMaxLengthCheckCase(value_type)
+ self.tagColNcharMaxLengthCheckCase(value_type)
+ self.batchInsertCheckCase(value_type)
+ self.multiInsertCheckCase(10, value_type)
+ self.multiColsInsertCheckCase(value_type)
+ self.blankColInsertCheckCase(value_type)
+ self.blankTagInsertCheckCase(value_type)
+ self.multiFieldCheckCase(value_type)
+ self.pointTransCheckCase(value_type)
+ self.stbInsertMultiThreadCheckCase(value_type)
+ self.tagNameLengthCheckCase()
+ self.boolTypeCheckCase()
+ self.batchErrorInsertCheckCase()
+ self.chineseCheckCase()
+ self.spellCheckCase()
+ self.tbnameTagsColsNameCheckCase()
+ # # MultiThreads
+ self.sStbStbDdataInsertMultiThreadCheckCase()
+ self.sStbStbDdataAtInsertMultiThreadCheckCase()
+ self.sStbStbDdataMtInsertMultiThreadCheckCase()
+ self.sStbDtbDdataInsertMultiThreadCheckCase()
+ self.sStbDtbDdataAtInsertMultiThreadCheckCase()
+ self.sStbDtbDdataDtsInsertMultiThreadCheckCase()
+ self.sStbDtbDdataMtInsertMultiThreadCheckCase()
+ self.sStbStbDdataDtsInsertMultiThreadCheckCase()
+ self.sStbStbDdataDtsMtInsertMultiThreadCheckCase()
+ self.sStbDtbDdataDtsMtInsertMultiThreadCheckCase()
+ self.lengthIcreaseCrashCheckCase()
+
+ def run(self):
+ print("running {}".format(__file__))
+ self.createDb()
+ try:
+ self.runAll()
+ except Exception as err:
+ print(''.join(traceback.format_exception(None, err, err.__traceback__)))
+ raise err
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/insert/openTsdbTelnetLinesInsert.py b/tests/pytest/insert/openTsdbTelnetLinesInsert.py
index c3524af5ba58d636a5f5810759aec507b648495b..de27ff7a08cb46a7d7219edaa186edad6f662716 100644
--- a/tests/pytest/insert/openTsdbTelnetLinesInsert.py
+++ b/tests/pytest/insert/openTsdbTelnetLinesInsert.py
@@ -20,6 +20,7 @@ from util.log import *
from util.cases import *
from util.sql import *
from util.common import tdCom
+from util.types import TDSmlProtocolType, TDSmlTimestampType
import threading
class TDTestCase:
@@ -37,19 +38,14 @@ class TDTestCase:
tdSql.execute(f"create database if not exists {name} precision 'us' update 1")
tdSql.execute(f'use {name}')
- def timeTrans(self, time_value):
- if time_value.lower().endswith("ns"):
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000000
- elif time_value.lower().endswith("us") or time_value.isdigit() and int(time_value) != 0:
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000
- elif time_value.lower().endswith("ms"):
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1000
- elif time_value.lower().endswith("s") and list(time_value)[-1] not in "num":
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1
- elif int(time_value) == 0:
+ def timeTrans(self, time_value, ts_type):
+ if int(time_value) == 0:
ts = time.time()
else:
- print("input ts maybe not right format")
+ if ts_type == TDSmlTimestampType.MILLI_SECOND.value or ts_type == None:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1000
+ elif ts_type == TDSmlTimestampType.SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1
ulsec = repr(ts).split('.')[1][:6]
if len(ulsec) < 6 and int(ulsec) != 0:
ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
@@ -66,50 +62,56 @@ class TDTestCase:
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
- def getTdTypeValue(self, value):
- if value.lower().endswith("i8"):
- td_type = "TINYINT"
- td_tag_value = ''.join(list(value)[:-2])
- elif value.lower().endswith("i16"):
- td_type = "SMALLINT"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.lower().endswith("i32"):
- td_type = "INT"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.lower().endswith("i64"):
- td_type = "BIGINT"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.lower().endswith("u64"):
- td_type = "BIGINT UNSIGNED"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.lower().endswith("f32"):
- td_type = "FLOAT"
- td_tag_value = ''.join(list(value)[:-3])
- td_tag_value = '{}'.format(np.float32(td_tag_value))
- elif value.lower().endswith("f64"):
- td_type = "DOUBLE"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.lower().startswith('l"'):
- td_type = "NCHAR"
- td_tag_value = ''.join(list(value)[2:-1])
- elif value.startswith('"') and value.endswith('"'):
- td_type = "BINARY"
- td_tag_value = ''.join(list(value)[1:-1])
- elif value.lower() == "t" or value.lower() == "true":
- td_type = "BOOL"
- td_tag_value = "True"
- elif value.lower() == "f" or value.lower() == "false":
- td_type = "BOOL"
- td_tag_value = "False"
- elif value.isdigit():
- td_type = "BIGINT"
- td_tag_value = value
- else:
- td_type = "DOUBLE"
- if "e" in value.lower():
+ def getTdTypeValue(self, value, vtype="col"):
+ if vtype == "col":
+ if value.lower().endswith("i8"):
+ td_type = "TINYINT"
+ td_tag_value = ''.join(list(value)[:-2])
+ elif value.lower().endswith("i16"):
+ td_type = "SMALLINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i32"):
+ td_type = "INT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i64"):
+ td_type = "BIGINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("u64"):
+ td_type = "BIGINT UNSIGNED"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("f32"):
+ td_type = "FLOAT"
+ td_tag_value = ''.join(list(value)[:-3])
+ td_tag_value = '{}'.format(np.float32(td_tag_value))
+ elif value.lower().endswith("f64"):
+ td_type = "DOUBLE"
+ td_tag_value = ''.join(list(value)[:-3])
+ if "e" in value.lower():
+ td_tag_value = str(float(td_tag_value))
+ elif value.lower().startswith('l"'):
+ td_type = "NCHAR"
+ td_tag_value = ''.join(list(value)[2:-1])
+ elif value.startswith('"') and value.endswith('"'):
+ td_type = "BINARY"
+ td_tag_value = ''.join(list(value)[1:-1])
+ elif value.lower() == "t" or value.lower() == "true":
+ td_type = "BOOL"
+ td_tag_value = "True"
+ elif value.lower() == "f" or value.lower() == "false":
+ td_type = "BOOL"
+ td_tag_value = "False"
+ elif value.isdigit():
+ td_type = "DOUBLE"
td_tag_value = str(float(value))
else:
- td_tag_value = value
+ td_type = "DOUBLE"
+ if "e" in value.lower():
+ td_tag_value = str(float(value))
+ else:
+ td_tag_value = value
+ elif vtype == "tag":
+ td_type = "NCHAR"
+ td_tag_value = str(value)
return td_type, td_tag_value
def typeTrans(self, type_list):
@@ -139,12 +141,12 @@ class TDTestCase:
type_num_list.append(14)
return type_num_list
- def inputHandle(self, input_sql):
+ def inputHandle(self, input_sql, ts_type):
input_sql_split_list = input_sql.split(" ")
stb_name = input_sql_split_list[0]
stb_tag_list = input_sql_split_list[3:]
stb_col_value = input_sql_split_list[2]
- ts_value = self.timeTrans(input_sql_split_list[1])
+ ts_value = self.timeTrans(input_sql_split_list[1], ts_type)
tag_name_list = []
tag_value_list = []
@@ -160,11 +162,11 @@ class TDTestCase:
if "id=" in elm.lower():
tb_name = elm.split('=')[1]
else:
- tag_name_list.append(elm.split("=")[0])
+ tag_name_list.append(elm.split("=")[0].lower())
tag_value_list.append(elm.split("=")[1])
tb_name = ""
- td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1])[1])
- td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1])[0])
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
col_name_list.append('value')
col_value_list.append(stb_col_value)
@@ -190,9 +192,9 @@ class TDTestCase:
def genFullTypeSql(self, stb_name="", tb_name="", value="", t0="", t1="127i8", t2="32767i16", t3="2147483647i32",
t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
- t8="L\"ncharTagValue\"", ts="1626006833639000000ns",
- id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_double_tag=None,
- t_add_tag=None, t_mul_tag=None, t_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
+ t8="L\"ncharTagValue\"", ts="1626006833641",
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ t_add_tag=None, t_mul_tag=None, c_multi_tag=None, c_blank_tag=None, t_blank_tag=None,
chinese_tag=None, multi_field_tag=None, point_trans_tag=None):
if stb_name == "":
stb_name = tdCom.getLongName(len=6, mode="letters")
@@ -206,33 +208,37 @@ class TDTestCase:
id = "ID"
else:
id = "id"
- sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\" t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ if id_mixul_tag is not None:
+ id = random.choice(["iD", "Id"])
+ else:
+ id = "id"
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
if id_noexist_tag is not None:
sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
if t_add_tag is not None:
sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t9={t8}'
if id_change_tag is not None:
- sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}=\"{tb_name}\" t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ sql_seq = f'{stb_name} {ts} {value} t0={t0} {id}={tb_name} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
if id_double_tag is not None:
sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}_1\" t0={t0} t1={t1} {id}=\"{tb_name}_2\" t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
if t_add_tag is not None:
- sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\" t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}'
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8} t11={t1} t10={t8}'
if t_mul_tag is not None:
- sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\" t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
if id_noexist_tag is not None:
sql_seq = f'{stb_name} {ts} {value} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
- if t_multi_tag is not None:
- sql_seq = f'{stb_name} {ts} {value} {value} {id}=\"{tb_name}\" t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
+ if c_multi_tag is not None:
+ sql_seq = f'{stb_name} {ts} {value} {value} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6}'
if c_blank_tag is not None:
- sql_seq = f'{stb_name} {ts} {id}=\"{tb_name}\" t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
+ sql_seq = f'{stb_name} {ts} {id}={tb_name} t0={t0} t1={t1} t2={t2} t3={t3} t4={t4} t5={t5} t6={t6} t7={t7} t8={t8}'
if t_blank_tag is not None:
- sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\"'
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name}'
if chinese_tag is not None:
sql_seq = f'{stb_name} {ts} L"涛思数据" t0={t0} t1=L"涛思数据"'
if multi_field_tag is not None:
- sql_seq = f'{stb_name} {ts} {value} {id}=\"{tb_name}\" t0={t0} {value}'
+ sql_seq = f'{stb_name} {ts} {value} {id}={tb_name} t0={t0} {value}'
if point_trans_tag is not None:
- sql_seq = f'point.trans.test {ts} {value} t0={t0}'
+ sql_seq = f'.point.trans.test {ts} {value} t0={t0}'
return sql_seq, stb_name
def genMulTagColStr(self, genType, count=1):
@@ -257,8 +263,8 @@ class TDTestCase:
tb_name = f'{stb_name}_1'
tag_str = self.genMulTagColStr("tag", tag_count)
col_str = self.genMulTagColStr("col")
- ts = "1626006833640000000ns"
- long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + f'id=\"{tb_name}\"' + ' ' + tag_str
+ ts = "1626006833641"
+ long_sql = stb_name + ' ' + ts + ' ' + col_str + ' ' + f'id={tb_name}' + ' ' + tag_str
return long_sql, stb_name
def getNoIdTbName(self, stb_name):
@@ -280,9 +286,12 @@ class TDTestCase:
res_type_list = col_info[1]
return res_row_list, res_field_list_without_ts, res_type_list
- def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None):
- expect_list = self.inputHandle(input_sql)
- self._conn.schemaless_insert([input_sql], 1)
+ def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, ts_type=None, id=True, none_check_tag=None, precision=None):
+ expect_list = self.inputHandle(input_sql, ts_type)
+ if precision == None:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, ts_type)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, precision)
query_sql = f"{query_sql} {stb_name} {condition}"
res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True)
if ts == 0:
@@ -309,6 +318,7 @@ class TDTestCase:
"""
normal tags and cols, one for every elm
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
@@ -317,6 +327,7 @@ class TDTestCase:
"""
check all normal type
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
@@ -331,6 +342,7 @@ class TDTestCase:
please test :
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
@@ -341,31 +353,67 @@ class TDTestCase:
def tsCheckCase(self):
"""
- test ts list --> ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
+ test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"]
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=None)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006834)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ms'")
+ tdSql.execute("use test_ts")
+ input_sql = ['test_ms 1626006833640 t t0=t', 'test_ms 1626006833641 f t0=t']
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None)
+ res = tdSql.query('select * from test_ms', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000")
+
+ def openTstbTelnetTsCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
- ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0]
- for ts in ts_list:
- input_sql, stb_name = self.genFullTypeSql(ts=ts)
- self.resCmp(input_sql, stb_name, ts=ts)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts=0)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006833640 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 1626006834 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ stb_name = input_sql.split(" ")[0]
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+ for ts in [1, 12, 123, 1234, 12345, 123456, 1234567, 12345678, 162600683, 16260068341, 162600683412, 16260068336401]:
+ try:
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")} {ts} 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.TELNET.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
def idSeqCheckCase(self):
"""
check id.index in tags
eg: t0=**,id=**,t1=**
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name)
- def idUpperCheckCase(self):
+ def idLetterCheckCase(self):
"""
check id param
eg: id and ID
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True)
self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True)
+ self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True)
self.resCmp(input_sql, stb_name)
@@ -373,6 +421,7 @@ class TDTestCase:
"""
id not exist
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
@@ -387,52 +436,49 @@ class TDTestCase:
"""
max tag count is 128
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_sql in [self.genLongSql(128)[0]]:
tdCom.cleanTb()
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
for input_sql in [self.genLongSql(129)[0]]:
tdCom.cleanTb()
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- def idIllegalNameCheckCase(self):
+ def stbTbNameCheckCase(self):
"""
test illegal id name
mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
- rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
+ rstr = list("~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
for i in rstr:
- input_sql = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"")[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"")
+ self.resCmp(input_sql, f'`{stb_name}`')
+ tdSql.execute(f'drop table if exists `{stb_name}`')
def idStartWithNumCheckCase(self):
"""
id is start with num
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
- input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb")
+ self.resCmp(input_sql, stb_name)
def nowTsCheckCase(self):
"""
check now unsupported
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(ts="now")[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -441,10 +487,11 @@ class TDTestCase:
"""
check date format ts unsupported
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -453,114 +500,74 @@ class TDTestCase:
"""
check ts format like 16260068336390us19
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- def tagValueLengthCheckCase(self):
+ def tbnameCheckCase(self):
"""
- check full type tag value limit
+ check length 192
+ check upper tbname
+ chech upper tag
+ length of stb_name tb_name <= 192
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ stb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tb_name_192 = tdCom.getLongName(len=192, mode="letters")
tdCom.cleanTb()
- # i8
- for t1 in ["-127i8", "127i8"]:
- input_sql, stb_name = self.genFullTypeSql(t1=t1)
- self.resCmp(input_sql, stb_name)
- for t1 in ["-128i8", "128i8"]:
- input_sql = self.genFullTypeSql(t1=t1)[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
- #i16
- for t2 in ["-32767i16", "32767i16"]:
- input_sql, stb_name = self.genFullTypeSql(t2=t2)
- self.resCmp(input_sql, stb_name)
- for t2 in ["-32768i16", "32768i16"]:
- input_sql = self.genFullTypeSql(t2=t2)[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
- #i32
- for t3 in ["-2147483647i32", "2147483647i32"]:
- input_sql, stb_name = self.genFullTypeSql(t3=t3)
- self.resCmp(input_sql, stb_name)
- for t3 in ["-2147483648i32", "2147483648i32"]:
- input_sql = self.genFullTypeSql(t3=t3)[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
- #i64
- for t4 in ["-9223372036854775807i64", "9223372036854775807i64"]:
- input_sql, stb_name = self.genFullTypeSql(t4=t4)
- self.resCmp(input_sql, stb_name)
- for t4 in ["-9223372036854775808i64", "9223372036854775808i64"]:
- input_sql = self.genFullTypeSql(t4=t4)[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
- # f32
- for t5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
- input_sql, stb_name = self.genFullTypeSql(t5=t5)
- self.resCmp(input_sql, stb_name)
- # * limit set to 4028234664*(10**38)
- for t5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
- input_sql = self.genFullTypeSql(t5=t5)[0]
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
+ self.resCmp(input_sql, stb_name)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+ for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]:
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
+ input_sql = 'Abcdffgg 1626006833640 False T1=127i8 id=Abcddd'
+ stb_name = "Abcdffgg"
+ self.resCmp(input_sql, stb_name)
- # f64
- for t6 in [f'{-1.79769*(10**308)}f64', f'{-1.79769*(10**308)}f64']:
- input_sql, stb_name = self.genFullTypeSql(t6=t6)
- self.resCmp(input_sql, stb_name)
- # * limit set to 1.797693134862316*(10**308)
- for t6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
- input_sql = self.genFullTypeSql(t6=t6)[0]
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
- # binary
+ def tagNameLengthCheckCase(self):
+ """
+ check tag name limit <= 62
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tag_name = tdCom.getLongName(61, "letters")
+ tag_name = f'T{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
- input_sql = f'{stb_name} 1626006833639000000ns t t0=t t1="{tdCom.getLongName(16374, "letters")}"'
- self._conn.schemaless_insert([input_sql], 1)
-
- input_sql = f'{stb_name} 1626006833639000000ns t t0=t t1="{tdCom.getLongName(16375, "letters")}"'
+ input_sql = f'{stb_name} 1626006833640 L"bcdaaa" {tag_name}=f'
+ self.resCmp(input_sql, stb_name)
+ input_sql = f'{stb_name} 1626006833640 L"gggcdaaa" {tdCom.getLongName(65, "letters")}=f'
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def tagValueLengthCheckCase(self):
+ """
+ check full type tag value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
# nchar
# * legal nchar could not be larger than 16374/4
stb_name = tdCom.getLongName(7, "letters")
- input_sql = f'{stb_name} 1626006833639000000ns t t0=t t1=L"{tdCom.getLongName(4093, "letters")}"'
- self._conn.schemaless_insert([input_sql], 1)
+ input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4093, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- input_sql = f'{stb_name} 1626006833639000000ns t t0=t t1=L"{tdCom.getLongName(4094, "letters")}"'
+ input_sql = f'{stb_name} 1626006833640 t t0=t t1={tdCom.getLongName(4094, "letters")}'
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -569,6 +576,7 @@ class TDTestCase:
"""
check full type col value limit
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
# i8
for value in ["-127i8", "127i8"]:
@@ -578,7 +586,7 @@ class TDTestCase:
for value in ["-128i8", "128i8"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -591,7 +599,7 @@ class TDTestCase:
for value in ["-32768i16", "32768i16"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -605,7 +613,7 @@ class TDTestCase:
for value in ["-2147483648i32", "2147483648i32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -619,7 +627,7 @@ class TDTestCase:
for value in ["-9223372036854775808i64", "9223372036854775808i64"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -634,7 +642,7 @@ class TDTestCase:
for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -649,7 +657,7 @@ class TDTestCase:
for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
input_sql = self.genFullTypeSql(value=value)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -657,13 +665,13 @@ class TDTestCase:
# # binary
tdCom.cleanTb()
stb_name = tdCom.getLongName(7, "letters")
- input_sql = f'{stb_name} 1626006833639000000ns "{tdCom.getLongName(16374, "letters")}" t0=t'
- self._conn.schemaless_insert([input_sql], 1)
+ input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
tdCom.cleanTb()
- input_sql = f'{stb_name} 1626006833639000000ns "{tdCom.getLongName(16375, "letters")}" t0=t'
+ input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -672,13 +680,13 @@ class TDTestCase:
# * legal nchar could not be larger than 16374/4
tdCom.cleanTb()
stb_name = tdCom.getLongName(7, "letters")
- input_sql = f'{stb_name} 1626006833639000000ns L"{tdCom.getLongName(4093, "letters")}" t0=t'
- self._conn.schemaless_insert([input_sql], 1)
+ input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
tdCom.cleanTb()
- input_sql = f'{stb_name} 1626006833639000000ns L"{tdCom.getLongName(4094, "letters")}" t0=t'
+ input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t'
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -688,6 +696,7 @@ class TDTestCase:
"""
test illegal tag col value
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
@@ -698,27 +707,15 @@ class TDTestCase:
# i8 i16 i32 i64 f32 f64
for input_sql in [
- self.genFullTypeSql(t1="1s2i8")[0],
- self.genFullTypeSql(t2="1s2i16")[0],
- self.genFullTypeSql(t3="1s2i32")[0],
- self.genFullTypeSql(t4="1s2i64")[0],
- self.genFullTypeSql(t5="11.1s45f32")[0],
- self.genFullTypeSql(t6="11.1s45f64")[0],
+ self.genFullTypeSql(value="1s2i8")[0],
+ self.genFullTypeSql(value="1s2i16")[0],
+ self.genFullTypeSql(value="1s2i32")[0],
+ self.genFullTypeSql(value="1s2i64")[0],
+ self.genFullTypeSql(value="11.1s45f32")[0],
+ self.genFullTypeSql(value="11.1s45f64")[0],
]:
try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
- # check binary and nchar blank
- input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc aaa" t0=t'
- input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abc aaa" t0=t'
- input_sql3 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0="abc aaa"'
- input_sql4 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=L"abc aaa"'
- for input_sql in [input_sql1, input_sql2, input_sql3, input_sql4]:
- try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -726,41 +723,36 @@ class TDTestCase:
# check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
- input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc{symbol}aaa" t0=t'
- input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=t t1="abc{symbol}aaa"'
- self._conn.schemaless_insert([input_sql1], 1)
- self._conn.schemaless_insert([input_sql2], 1)
+ input_sql1 = f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc{symbol}aaa" t0=t'
+ input_sql2 = f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=t t1="abc{symbol}aaa"'
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.TELNET.value, None)
+ self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.TELNET.value, None)
def blankCheckCase(self):
'''
check blank case
'''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
- input_sql_list = [f'{tdCom.getLongName(7, "letters")} {tdCom.getLongName(7, "letters")} 1626006833639000000ns "abcaaa" t0=t',
- f'{tdCom.getLongName(7, "letters")} 16260068336 39000000ns L"bcdaaa" t1=f',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0="abc aaa"',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns t t0=L"abc aaa"',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns "abc aaa" t0=L"abcaaa"',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abc aaa" t0=L"abcaaa"',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abaaa" t0=L"abcaaa1"',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abaaa" t0=L"abcaaa2"',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abaaa" t0=t t1="abc t2="taa""',
- f'{tdCom.getLongName(7, "letters")} 1626006833639000000ns L"abaaa" t0=L"abcaaa3"']
+ input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t',
+ f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
+ f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"',
+ f'{tdCom.getLongName(7, "letters")} 1626006833640 L"aba aa" t0=L"abcaaa3" ']
for input_sql in input_sql_list:
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ stb_name = input_sql.split(" ")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
def duplicateIdTagColInsertCheckCase(self):
"""
check duplicate Id Tag Col
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
- self._conn.schemaless_insert([input_sql_id], 1)
+ self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -768,7 +760,7 @@ class TDTestCase:
input_sql = self.genFullTypeSql()[0]
input_sql_tag = input_sql.replace("t5", "t6")
try:
- self._conn.schemaless_insert([input_sql_tag], 1)
+ self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -778,7 +770,7 @@ class TDTestCase:
"""
case no id when stb exist
"""
- print("noIdStbExistCheckCase")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f")
self.resCmp(input_sql, stb_name)
@@ -791,17 +783,18 @@ class TDTestCase:
"""
check duplicate insert when stb exist
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
self.resCmp(input_sql, stb_name)
def tagColBinaryNcharLengthCheckCase(self):
"""
check length increase
"""
- print("tagColBinaryNcharLengthCheckCase")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
@@ -818,26 +811,33 @@ class TDTestCase:
* col is added without value when update==0
* col is added with value when update==1
"""
- print("tagColAddDupIDCheckCase")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
self.createDb("test_update", db_update_tag=db_update_tag)
- input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", value="t")
self.resCmp(input_sql, stb_name)
- self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", value="f", t_add_tag=True)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", value="f", t_add_tag=True)
if db_update_tag == 1 :
- self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
- else:
self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
self.createDb()
def tagColAddCheckCase(self):
"""
check tag count add
"""
- print("tagColAddCheckCase")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
@@ -854,6 +854,7 @@ class TDTestCase:
condition: stb not change
insert two table, keep tag unchange, change col
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
@@ -865,58 +866,32 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.checkEqual(tb_name1, tb_name2)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, t0="f", value="f", id_noexist_tag=True, t_add_tag=True)
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
tb_name3 = self.getNoIdTbName(stb_name)
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
tdSql.checkNotEqual(tb_name1, tb_name3)
- # * tag binary max is 16384, col+ts binary max 49151
- def tagColBinaryMaxLengthCheckCase(self):
- """
- every binary and nchar must be length+2
- """
- tdCom.cleanTb()
- stb_name = tdCom.getLongName(7, "letters")
- tb_name = f'{stb_name}_1'
-
- input_sql = f'{stb_name} 1626006833639000000ns f id="{tb_name}" t0=t'
- self._conn.schemaless_insert([input_sql], 1)
-
- # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
- input_sql = f'{stb_name} 1626006833639000000ns f t0=t t1="{tdCom.getLongName(16374, "letters")}" t2="{tdCom.getLongName(5, "letters")}"'
- self._conn.schemaless_insert([input_sql], 1)
-
- tdSql.query(f"select * from {stb_name}")
- tdSql.checkRows(2)
- input_sql = f'{stb_name} 1626006833639000000ns f t0=t t1="{tdCom.getLongName(16374, "letters")}" t2="{tdCom.getLongName(6, "letters")}"'
- try:
- self._conn.schemaless_insert([input_sql], 1)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
- tdSql.query(f"select * from {stb_name}")
- tdSql.checkRows(2)
-
# * tag nchar max is 16374/4, col+ts nchar max 49151
def tagColNcharMaxLengthCheckCase(self):
"""
check nchar length limit
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
- input_sql = f'{stb_name} 1626006833639000000ns f id="{tb_name}" t0=t'
- self._conn.schemaless_insert([input_sql], 1)
+ input_sql = f'{stb_name} 1626006833640 f id={tb_name} t2={tdCom.getLongName(1, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
# * legal nchar could not be larger than 16374/4
- input_sql = f'{stb_name} 1626006833639000000ns f t0=t t1=L"{tdCom.getLongName(4093, "letters")}" t2=L"{tdCom.getLongName(1, "letters")}"'
- self._conn.schemaless_insert([input_sql], 1)
+ input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(1, "letters")}'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
- input_sql = f'{stb_name} 1626006833639000000ns f t0=t t1=L"{tdCom.getLongName(4093, "letters")}" t2=L"{tdCom.getLongName(2, "letters")}"'
+ input_sql = f'{stb_name} 1626006833640 f t1={tdCom.getLongName(4093, "letters")} t2={tdCom.getLongName(2, "letters")}'
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -927,21 +902,22 @@ class TDTestCase:
"""
test batch insert
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
- lines = ["st123456 1626006833639000000ns 1i64 t1=3i64 t2=4f64 t3=\"t3\"",
- "st123456 1626006833640000000ns 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
- f'{stb_name} 1626056811823316532ns 3i64 t2=5f64 t3=L\"ste\"',
- "stf567890 1626006933640000000ns 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
- "st123456 1626006833642000000ns 5i64 t1=4i64 t2=5f64 t3=\"t4\"",
- f'{stb_name} 1626056811843316532ns 6i64 t2=5f64 t3=L\"ste2\"',
- f'{stb_name} 1626056812843316532ns 7i64 t2=5f64 t3=L\"ste2\"',
- "st123456 1626006933640000000ns 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
- "st123456 1626006933641000000ns 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64"
+ lines = ["st123456 1626006833640 1i64 t1=3i64 t2=4f64 t3=\"t3\"",
+ "st123456 1626006833641 2i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ f'{stb_name} 1626006833642 3i64 t2=5f64 t3=L\"ste\"',
+ "stf567890 1626006833643 4i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ "st123456 1626006833644 5i64 t1=4i64 t2=5f64 t3=\"t4\"",
+ f'{stb_name} 1626006833645 6i64 t2=5f64 t3=L\"ste2\"',
+ f'{stb_name} 1626006833646 7i64 t2=5f64 t3=L\"ste2\"',
+ "st123456 1626006833647 8i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64",
+ "st123456 1626006833648 9i64 t1=4i64 t3=\"t4\" t2=5f64 t4=5f64"
]
- self._conn.schemaless_insert(lines, 1)
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None)
tdSql.query('show stables')
tdSql.checkRows(3)
tdSql.query('show tables')
@@ -950,30 +926,32 @@ class TDTestCase:
tdSql.checkRows(5)
def multiInsertCheckCase(self, count):
- """
- test multi insert
- """
- tdCom.cleanTb()
- sql_list = []
- stb_name = tdCom.getLongName(8, "letters")
- tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
- for i in range(count):
- input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
- sql_list.append(input_sql)
- self._conn.schemaless_insert(sql_list, 1)
- tdSql.query('show tables')
- tdSql.checkRows(count)
+ """
+ test multi insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ sql_list = []
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
+ for i in range(count):
+ input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', value=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
+ sql_list.append(input_sql)
+ self._conn.schemaless_insert(sql_list, TDSmlProtocolType.TELNET.value, None)
+ tdSql.query('show tables')
+ tdSql.checkRows(count)
def batchErrorInsertCheckCase(self):
"""
test batch error insert
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
stb_name = tdCom.getLongName(8, "letters")
- lines = ["st123456 1626006833639000000ns 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
+ lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""]
try:
- self._conn.schemaless_insert(lines, 1)
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -982,10 +960,11 @@ class TDTestCase:
"""
test multi cols insert
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
- input_sql = self.genFullTypeSql(t_multi_tag=True)[0]
+ input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -994,10 +973,11 @@ class TDTestCase:
"""
test blank col insert
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -1006,10 +986,11 @@ class TDTestCase:
"""
test blank tag insert
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -1018,6 +999,7 @@ class TDTestCase:
"""
check nchar ---> chinese
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
self.resCmp(input_sql, stb_name)
@@ -1026,51 +1008,69 @@ class TDTestCase:
'''
multi_field
'''
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(multi_field_tag=True)[0]
try:
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- def errorTypeCheckCase(self):
+ def spellCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
stb_name = tdCom.getLongName(8, "letters")
- input_sql_list = [f'{stb_name}_1 1626006833639000000Ns "hkgjiwdj" t0=f t1=127I8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_2 1626006833639000001nS "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_3 1626006833639000002NS "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_4 1626006833639019Us "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647I32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_5 1626006833639018uS "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807I64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_6 1626006833639017US "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807I64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_7 1626006833640Ms "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789F64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_8 1626006833641mS "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_9 1626006833642MS "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_10 1626006834S "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=l"ncharTagValue"', \
- f'{stb_name}_11 1626006834S "hkgjiwdj" t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"']
+ input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_3 1626006833640 2147483647I32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_4 1626006833640 9223372036854775807I64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_5 1626006833640 11.12345027923584F32 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_6 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_7 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_8 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_9 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
+ f'{stb_name}_10 1626006833640 22.123456789F64 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64']
for input_sql in input_sql_list:
- stb_name = input_sql.split(" ")[0]
+ stb_name = input_sql.split(' ')[0]
self.resCmp(input_sql, stb_name)
def pointTransCheckCase(self):
"""
metric value "." trans to "_"
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genFullTypeSql(point_trans_tag=True)[0]
- stb_name = input_sql.split(" ")[0].replace(".", "_")
+ stb_name = f'`{input_sql.split(" ")[0]}`'
self.resCmp(input_sql, stb_name)
+ tdSql.execute("drop table `.point.trans.test`")
def defaultTypeCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
stb_name = tdCom.getLongName(8, "letters")
- input_sql_list = [f'{stb_name}_1 1626006833639000000Ns 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_2 1626006834S 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_3 1626006834S 10e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_4 1626006834S 10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5 t7="vozamcts" t8=L"ncharTagValue"', \
- f'{stb_name}_5 1626006834S -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"']
+ input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_3 1626006833642 10e5F32 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10e5F64 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_4 1626006833643 10.0e5F64 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=10.0e5F32 t7="vozamcts" t8=L"ncharTagValue"', \
+ f'{stb_name}_5 1626006833644 -10.0e5 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=-10.0e5 t7="vozamcts" t8=L"ncharTagValue"']
for input_sql in input_sql_list:
stb_name = input_sql.split(" ")[0]
self.resCmp(input_sql, stb_name)
+ def tbnameTagsColsNameCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
+ query_sql = 'select * from `rfa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 33, 54), 9.223372036854776e+18, 'true', '127Ii8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['ts', 'value', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9'])
+ tdSql.execute('drop table `rfa$sta`')
+
def genSqlList(self, count=5, stb_name="", tb_name=""):
"""
stb --> supertable
@@ -1120,7 +1120,7 @@ class TDTestCase:
def genMultiThreadSeq(self, sql_list):
tlist = list()
for insert_sql in sql_list:
- t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]],1))
+ t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.TELNET.value, None))
tlist.append(t)
return tlist
@@ -1134,6 +1134,7 @@ class TDTestCase:
"""
thread input different stb
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql = self.genSqlList()[0]
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
@@ -1144,6 +1145,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, result keep first data
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
@@ -1161,6 +1163,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, add columes and tags, result keep first data
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
@@ -1178,6 +1181,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
@@ -1195,6 +1199,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1207,14 +1212,15 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add col, mul tag
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
- s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833639000000ns "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
- (f'{stb_name} 1626006833639000000ns "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
- (f'{stb_name} 1626006833639000000ns "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
- (f'{stb_name} 1626006833639000000ns "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
- (f'{stb_name} 1626006833639000000ns "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')]
+ s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "vqowydbc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "plgkckpv" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "cujyqvlj" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
+ (f'{stb_name} 1626006833640 "twjxisat" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_m_tag_list))
tdSql.query(f"show tables;")
tdSql.checkRows(3)
@@ -1223,6 +1229,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add tag, mul col
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1235,15 +1242,16 @@ class TDTestCase:
"""
thread input same stb tb, different ts
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
- s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id="{tb_name}" t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \
- (f'{stb_name} 0 "rljjrrul" id="{tb_name}" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \
- (f'{stb_name} 0 "basanglx" id="{tb_name}" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \
- (f'{stb_name} 0 "clsajzpp" id="{tb_name}" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \
- (f'{stb_name} 0 "jitwseso" id="{tb_name}" t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')]
+ s_stb_s_tb_d_ts_list = [(f'{stb_name} 0 "hkgjiwdj" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "rljjrrul" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="bmcanhbs" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "basanglx" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enqkyvmb" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "clsajzpp" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="eivaegjk" t8=L"ncharTagValue"', 'dwpthv'), \
+ (f'{stb_name} 0 "jitwseso" id={tb_name} t0=T t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="yhlwkddq" t8=L"ncharTagValue"', 'dwpthv')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
tdSql.query(f"show tables;")
tdSql.checkRows(1)
@@ -1254,6 +1262,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add col, mul tag
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
@@ -1271,15 +1280,16 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add tag, mul col
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
- s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id="{tb_name}" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
- (f'{stb_name} 0 "yqeztggb" id="{tb_name}" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
- (f'{stb_name} 0 "gbkinqdk" id="{tb_name}" t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
- (f'{stb_name} 0 "ldxxejbd" id="{tb_name}" t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
- (f'{stb_name} 0 "tlvzwjes" id="{tb_name}" t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')]
+ s_stb_s_tb_d_ts_a_tag_list = [(f'{stb_name} 0 "clummqfy" id={tb_name} t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="hpxzrdiw" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "yqeztggb" id={tb_name} t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="gdtblmrc" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "gbkinqdk" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="iqniuvco" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "ldxxejbd" id={tb_name} t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="vxkipags" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl'), \
+ (f'{stb_name} 0 "tlvzwjes" id={tb_name} t0=true t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64 t7="enwrlrtj" t8=L"ncharTagValue" t11=127i8 t10=L"ncharTagValue"', 'bokaxl')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_list))
tdSql.query(f"show tables;")
tdSql.checkRows(1)
@@ -1293,6 +1303,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1305,22 +1316,23 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts, add col, mul tag
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
- s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=f t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
- (f'{stb_name} 0 "zbvwckcd" t0=True t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
- (f'{stb_name} 0 "vymcjfwc" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
- (f'{stb_name} 0 "laumkwfn" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
- (f'{stb_name} 0 "nyultzxr" t0=false t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')]
+ s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "zbvwckcd" t0=True t1=126i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "vymcjfwc" t0=False t1=125i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "laumkwfn" t0=False t1=124i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
+ (f'{stb_name} 0 "nyultzxr" t0=false t1=123i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_m_tag_list))
tdSql.query(f"show tables;")
- tdSql.checkRows(3)
+ tdSql.checkRows(6)
def test(self):
try:
input_sql = f'test_nchar 0 L"涛思数据" t0=f t1=L"涛思数据" t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64'
- self._conn.schemaless_insert([input_sql], 1)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
except SchemalessError as err:
print(err.errno)
@@ -1329,15 +1341,18 @@ class TDTestCase:
self.boolTypeCheckCase()
self.symbolsCheckCase()
self.tsCheckCase()
+ self.openTstbTelnetTsCheckCase()
self.idSeqCheckCase()
- self.idUpperCheckCase()
+ self.idLetterCheckCase()
self.noIdCheckCase()
self.maxColTagCheckCase()
- self.idIllegalNameCheckCase()
+ self.stbTbNameCheckCase()
self.idStartWithNumCheckCase()
self.nowTsCheckCase()
self.dateFormatTsCheckCase()
self.illegalTsCheckCase()
+ self.tbnameCheckCase()
+ self.tagNameLengthCheckCase()
self.tagValueLengthCheckCase()
self.colValueLengthCheckCase()
self.tagColIllegalValueCheckCase()
@@ -1349,7 +1364,6 @@ class TDTestCase:
self.tagColAddDupIDCheckCase()
self.tagColAddCheckCase()
self.tagMd5Check()
- self.tagColBinaryMaxLengthCheckCase()
self.tagColNcharMaxLengthCheckCase()
self.batchInsertCheckCase()
self.multiInsertCheckCase(10)
@@ -1359,10 +1373,11 @@ class TDTestCase:
self.blankTagInsertCheckCase()
self.chineseCheckCase()
self.multiFieldCheckCase()
- self.errorTypeCheckCase()
+ self.spellCheckCase()
self.pointTransCheckCase()
self.defaultTypeCheckCase()
- # # MultiThreads
+ self.tbnameTagsColsNameCheckCase()
+ # # # MultiThreads
self.stbInsertMultiThreadCheckCase()
self.sStbStbDdataInsertMultiThreadCheckCase()
self.sStbStbDdataAtInsertMultiThreadCheckCase()
@@ -1381,7 +1396,6 @@ class TDTestCase:
self.createDb()
try:
self.runAll()
- # self.test()
except Exception as err:
print(''.join(traceback.format_exception(None, err, err.__traceback__)))
raise err
diff --git a/tests/pytest/insert/schemalessInsert.py b/tests/pytest/insert/schemalessInsert.py
index 94ea0ab79a54cbb7daea1a431fa566567b9de684..dc8528f37215af3b73e351d1255450954d0d2b07 100644
--- a/tests/pytest/insert/schemalessInsert.py
+++ b/tests/pytest/insert/schemalessInsert.py
@@ -13,18 +13,15 @@
import traceback
import random
-import string
from taos.error import SchemalessError
import time
-from copy import deepcopy
import numpy as np
from util.log import *
from util.cases import *
from util.sql import *
+from util.common import tdCom
from util.types import TDSmlProtocolType, TDSmlTimestampType
import threading
-
-
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -40,32 +37,19 @@ class TDTestCase:
tdSql.execute(f"create database if not exists {name} precision 'us' update 1")
tdSql.execute(f'use {name}')
- def getLongName(self, len, mode = "mixed"):
- """
- generate long name
- mode could be numbers/letters/mixed
- """
- if mode is "numbers":
- chars = ''.join(random.choice(string.digits) for i in range(len))
- elif mode is "letters":
- chars = ''.join(random.choice(string.ascii_letters.lower()) for i in range(len))
- else:
- chars = ''.join(random.choice(string.ascii_letters.lower() + string.digits) for i in range(len))
- return chars
-
- def timeTrans(self, time_value):
- if time_value.endswith("ns"):
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000000
- elif time_value.endswith("us") or time_value.isdigit() and int(time_value) != 0:
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000
- elif time_value.endswith("ms"):
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1000
- elif time_value.endswith("s") and list(time_value)[-1] not in "num":
- ts = int(''.join(list(filter(str.isdigit, time_value))))/1
- elif int(time_value) == 0:
+ def timeTrans(self, time_value, ts_type):
+ # TDSmlTimestampType.HOUR.value, TDSmlTimestampType.MINUTE.value, TDSmlTimestampType.SECOND.value, TDSmlTimestampType.MICRO_SECOND.value, TDSmlTimestampType.NANO_SECOND.value
+ if int(time_value) == 0:
ts = time.time()
else:
- print("input ts maybe not right format")
+ if ts_type == TDSmlTimestampType.NANO_SECOND.value or ts_type is None:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000000
+ elif ts_type == TDSmlTimestampType.MICRO_SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1000000
+ elif ts_type == TDSmlTimestampType.MILLI_SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1000
+ elif ts_type == TDSmlTimestampType.SECOND.value:
+ ts = int(''.join(list(filter(str.isdigit, time_value))))/1
ulsec = repr(ts).split('.')[1][:6]
if len(ulsec) < 6 and int(ulsec) != 0:
ulsec = int(ulsec) * (10 ** (6 - len(ulsec)))
@@ -82,44 +66,59 @@ class TDTestCase:
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
- def getTdTypeValue(self, value):
- if value.endswith("i8"):
- td_type = "TINYINT"
- td_tag_value = ''.join(list(value)[:-2])
- elif value.endswith("i16"):
- td_type = "SMALLINT"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.endswith("i32"):
- td_type = "INT"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.endswith("i64"):
- td_type = "BIGINT"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.endswith("u64"):
- td_type = "BIGINT UNSIGNED"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.endswith("f32"):
- td_type = "FLOAT"
- td_tag_value = ''.join(list(value)[:-3])
- td_tag_value = '{}'.format(np.float32(td_tag_value))
- elif value.endswith("f64"):
- td_type = "DOUBLE"
- td_tag_value = ''.join(list(value)[:-3])
- elif value.startswith('L"'):
+ def getTdTypeValue(self, value, vtype="col"):
+ """
+ vtype must be col or tag
+ """
+ if vtype == "col":
+ if value.lower().endswith("i8"):
+ td_type = "TINYINT"
+ td_tag_value = ''.join(list(value)[:-2])
+ elif value.lower().endswith("i16"):
+ td_type = "SMALLINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i32"):
+ td_type = "INT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("i64"):
+ td_type = "BIGINT"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().lower().endswith("u64"):
+ td_type = "BIGINT UNSIGNED"
+ td_tag_value = ''.join(list(value)[:-3])
+ elif value.lower().endswith("f32"):
+ td_type = "FLOAT"
+ td_tag_value = ''.join(list(value)[:-3])
+ td_tag_value = '{}'.format(np.float32(td_tag_value))
+ elif value.lower().endswith("f64"):
+ td_type = "DOUBLE"
+ td_tag_value = ''.join(list(value)[:-3])
+ if "e" in value.lower():
+ td_tag_value = str(float(td_tag_value))
+ elif value.lower().startswith('l"'):
+ td_type = "NCHAR"
+ td_tag_value = ''.join(list(value)[2:-1])
+ elif value.startswith('"') and value.endswith('"'):
+ td_type = "BINARY"
+ td_tag_value = ''.join(list(value)[1:-1])
+ elif value.lower() == "t" or value.lower() == "true":
+ td_type = "BOOL"
+ td_tag_value = "True"
+ elif value.lower() == "f" or value.lower() == "false":
+ td_type = "BOOL"
+ td_tag_value = "False"
+ elif value.isdigit():
+ td_type = "DOUBLE"
+ td_tag_value = str(float(value))
+ else:
+ td_type = "DOUBLE"
+ if "e" in value.lower():
+ td_tag_value = str(float(value))
+ else:
+ td_tag_value = value
+ elif vtype == "tag":
td_type = "NCHAR"
- td_tag_value = ''.join(list(value)[2:-1])
- elif value.startswith('"') and value.endswith('"'):
- td_type = "BINARY"
- td_tag_value = ''.join(list(value)[1:-1])
- elif value.lower() == "t" or value == "true" or value == "True":
- td_type = "BOOL"
- td_tag_value = "True"
- elif value.lower() == "f" or value == "false" or value == "False":
- td_type = "BOOL"
- td_tag_value = "False"
- else:
- td_type = "FLOAT"
- td_tag_value = value
+ td_tag_value = str(value)
return td_type, td_tag_value
def typeTrans(self, type_list):
@@ -149,12 +148,12 @@ class TDTestCase:
type_num_list.append(14)
return type_num_list
- def inputHandle(self, input_sql):
+ def inputHandle(self, input_sql, ts_type):
input_sql_split_list = input_sql.split(" ")
stb_tag_list = input_sql_split_list[0].split(',')
stb_col_list = input_sql_split_list[1].split(',')
- ts_value = self.timeTrans(input_sql_split_list[2])
+ ts_value = self.timeTrans(input_sql_split_list[2], ts_type)
stb_name = stb_tag_list[0]
stb_tag_list.pop(0)
@@ -173,11 +172,11 @@ class TDTestCase:
if "id=" in elm.lower():
tb_name = elm.split('=')[1]
else:
- tag_name_list.append(elm.split("=")[0])
+ tag_name_list.append(elm.split("=")[0].lower())
tag_value_list.append(elm.split("=")[1])
tb_name = ""
- td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1])[1])
- td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1])[0])
+ td_tag_value_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[1])
+ td_tag_type_list.append(self.getTdTypeValue(elm.split("=")[1], "tag")[0])
for elm in stb_col_list:
col_name_list.append(elm.split("=")[0])
@@ -205,43 +204,58 @@ class TDTestCase:
t4="9223372036854775807i64", t5="11.12345f32", t6="22.123456789f64", t7="\"binaryTagValue\"",
t8="L\"ncharTagValue\"", c0="", c1="127i8", c2="32767i16", c3="2147483647i32",
c4="9223372036854775807i64", c5="11.12345f32", c6="22.123456789f64", c7="\"binaryColValue\"",
- c8="L\"ncharColValue\"", c9="7u64", ts="1626006833639000000ns",
- id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_double_tag=None,
- ct_add_tag=None, ct_am_tag=None, ct_ma_tag=None, ct_min_tag=None):
+ c8="L\"ncharColValue\"", c9="7u64", ts="1626006833639000000", ts_type=None,
+ id_noexist_tag=None, id_change_tag=None, id_upper_tag=None, id_mixul_tag=None, id_double_tag=None,
+ ct_add_tag=None, ct_am_tag=None, ct_ma_tag=None, ct_min_tag=None, c_multi_tag=None, t_multi_tag=None,
+ c_blank_tag=None, t_blank_tag=None, chinese_tag=None):
if stb_name == "":
- stb_name = self.getLongName(len=6, mode="letters")
+ stb_name = tdCom.getLongName(len=6, mode="letters")
if tb_name == "":
tb_name = f'{stb_name}_{random.randint(0, 65535)}_{random.randint(0, 65535)}'
if t0 == "":
- t0 = random.choice(["f", "F", "false", "False", "t", "T", "true", "True"])
+ t0 = "t"
if c0 == "":
c0 = random.choice(["f", "F", "false", "False", "t", "T", "true", "True"])
- #sql_seq = f'{stb_name},id=\"{tb_name}\",t0={t0},t1=127i8,t2=32767i16,t3=125.22f64,t4=11.321f32,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0={bool_value},c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryValue\",c8=L\"ncharValue\" 1626006833639000000ns'
+ #sql_seq = f'{stb_name},id=\"{tb_name}\",t0={t0},t1=127i8,t2=32767i16,t3=125.22f64,t4=11.321f32,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\" c0={bool_value},c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryValue\",c8=L\"ncharValue\" 1626006833639000000'
if id_upper_tag is not None:
id = "ID"
else:
id = "id"
- sql_seq = f'{stb_name},{id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if id_mixul_tag is not None:
+ id = random.choice(["iD", "Id"])
+ else:
+ id = "id"
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
if id_noexist_tag is not None:
sql_seq = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
if ct_add_tag is not None:
sql_seq = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t9={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
if id_change_tag is not None:
- sql_seq = f'{stb_name},t0={t0},t1={t1},{id}=\"{tb_name}\",t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ sql_seq = f'{stb_name},t0={t0},t1={t1},{id}={tb_name},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
if id_double_tag is not None:
sql_seq = f'{stb_name},{id}=\"{tb_name}_1\",t0={t0},t1={t1},{id}=\"{tb_name}_2\",t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
if ct_add_tag is not None:
- sql_seq = f'{stb_name},{id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
if ct_am_tag is not None:
- sql_seq = f'{stb_name},{id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
if id_noexist_tag is not None:
sql_seq = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9},c11={c8},c10={t0} {ts}'
if ct_ma_tag is not None:
- sql_seq = f'{stb_name},{id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
if id_noexist_tag is not None:
sql_seq = f'{stb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8},t11={t1},t10={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
if ct_min_tag is not None:
- sql_seq = f'{stb_name},{id}=\"{tb_name}\",t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6} {ts}'
+ if c_multi_tag is not None:
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} c10={c9} {ts}'
+ if t_multi_tag is not None:
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} t9={t8} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if c_blank_tag is not None:
+ sql_seq = f'{stb_name},{id}={tb_name},t0={t0},t1={t1},t2={t2},t3={t3},t4={t4},t5={t5},t6={t6},t7={t7},t8={t8} {ts}'
+ if t_blank_tag is not None:
+ sql_seq = f'{stb_name},{id}={tb_name} c0={c0},c1={c1},c2={c2},c3={c3},c4={c4},c5={c5},c6={c6},c7={c7},c8={c8},c9={c9} {ts}'
+ if chinese_tag is not None:
+ sql_seq = f'{stb_name},to=L"涛思数据" c0=L"涛思数据" {ts}'
return sql_seq, stb_name
def genMulTagColStr(self, genType, count):
@@ -266,12 +280,12 @@ class TDTestCase:
return col_str
def genLongSql(self, tag_count, col_count):
- stb_name = self.getLongName(7, mode="letters")
+ stb_name = tdCom.getLongName(7, mode="letters")
tb_name = f'{stb_name}_1'
tag_str = self.genMulTagColStr("tag", tag_count)
col_str = self.genMulTagColStr("col", col_count)
- ts = "1626006833640000000ns"
- long_sql = stb_name + ',' + f'id=\"{tb_name}\"' + ',' + tag_str + col_str + ts
+ ts = "1626006833640000000"
+ long_sql = stb_name + ',' + f'id={tb_name}' + ',' + tag_str + col_str + ts
return long_sql, stb_name
def getNoIdTbName(self, stb_name):
@@ -293,9 +307,12 @@ class TDTestCase:
res_type_list = col_info[1]
return res_row_list, res_field_list_without_ts, res_type_list
- def resCmp(self, input_sql, stb_name, query_sql="select * from", condition="", ts=None, id=True, none_check_tag=None):
- expect_list = self.inputHandle(input_sql)
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ def resCmp(self, input_sql, stb_name, query_sql="select * from", ts_type=None, condition="", ts=None, id=True, none_check_tag=None, precision=None):
+ expect_list = self.inputHandle(input_sql, ts_type)
+ if precision == None:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, ts_type)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, precision)
query_sql = f"{query_sql} {stb_name} {condition}"
res_row_list, res_field_list_without_ts, res_type_list = self.resHandle(query_sql, True)
if ts == 0:
@@ -330,7 +347,8 @@ class TDTestCase:
"""
normal tags and cols, one for every elm
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
@@ -338,7 +356,8 @@ class TDTestCase:
"""
check all normal type
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
@@ -352,7 +371,8 @@ class TDTestCase:
please test :
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\"'
nchar_symbols = f'L{binary_symbols}'
input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols)
@@ -360,32 +380,161 @@ class TDTestCase:
def tsCheckCase(self):
"""
- test ts list --> ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
+ test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
- self.cleanStb()
- ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0]
- for ts in ts_list:
- input_sql, stb_name = self.genFullTypeSql(ts=ts)
- self.resCmp(input_sql, stb_name, ts=ts)
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833639000000)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.NANO_SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833639019)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MICRO_SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006834)
+ self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.SECOND.value)
+ input_sql, stb_name = self.genFullTypeSql(ts=1626006833639000000)
+ self.resCmp(input_sql, stb_name, ts_type=None)
+ input_sql, stb_name = self.genFullTypeSql(ts=0)
+ self.resCmp(input_sql, stb_name, ts=0)
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ms'")
+ tdSql.execute("use test_ts")
+ input_sql = ['test_ms,t0=t c0=t 1626006833640', 'test_ms,t0=t c0=f 1626006833641']
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.LINE.value, TDSmlTimestampType.MILLI_SECOND.value)
+ res = tdSql.query('select * from test_ms', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.640000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.641000")
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'us'")
+ tdSql.execute("use test_ts")
+ input_sql = ['test_us,t0=t c0=t 1626006833639000', 'test_us,t0=t c0=f 1626006833639001']
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.LINE.value, TDSmlTimestampType.MICRO_SECOND.value)
+ res = tdSql.query('select * from test_us', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.639000")
+ tdSql.checkEqual(str(res[1][0]), "2021-07-11 20:33:53.639001")
+
+ tdSql.execute(f"drop database if exists test_ts")
+ tdSql.execute(f"create database if not exists test_ts precision 'ns'")
+ tdSql.execute("use test_ts")
+ input_sql = ['test_ns,t0=t c0=t 1626006833639000000', 'test_ns,t0=t c0=f 1626006833639000001']
+ self._conn.schemaless_insert(input_sql, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ res = tdSql.query('select * from test_ns', True)
+ tdSql.checkEqual(str(res[0][0]), "1626006833639000000")
+ tdSql.checkEqual(str(res[1][0]), "1626006833639000001")
+
+ self.createDb()
+
+ def zeroTsCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ for ts_tag in [TDSmlTimestampType.HOUR.value, TDSmlTimestampType.MINUTE.value, TDSmlTimestampType.SECOND.value, TDSmlTimestampType.MICRO_SECOND.value, TDSmlTimestampType.NANO_SECOND.value]:
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 0'
+ stb_name = input_sql.split(",")[0]
+ self.resCmp(input_sql, stb_name, ts=0, precision=ts_tag)
+ def influxTsCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 454093'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.HOUR.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 21:00:00")
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 454094'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.HOUR.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 22:00:00")
+
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 27245538'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MINUTE.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:18:00")
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 27245539'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MINUTE.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:19:00")
+
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1634731694'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:08:14")
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1634731695'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:08:15")
+
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1634731684002'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MILLI_SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:08:04.002000")
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1634731684003'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MILLI_SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:08:04.003000")
+
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1634731684000001'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MICRO_SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:08:04.000001")
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1634731684000002'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.MICRO_SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-10-20 20:08:04.000002")
+
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:33:53.639000")
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1626007833639000000'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ res = tdSql.query(f'select * from {stb_name}', True)
+ tdSql.checkEqual(str(res[0][0]), "2021-07-11 20:50:33.639000")
+
+ def iuCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = f'{tdCom.getLongName(len=10, mode="letters")},t0=127 c1=9223372036854775807i,c2=1u 0'
+ stb_name = input_sql.split(",")[0]
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ tdSql.query(f'describe {stb_name}', True)
+ tdSql.checkData(1, 1, "BIGINT")
+ tdSql.checkData(2, 1, "BIGINT UNSIGNED")
+
def idSeqCheckCase(self):
"""
check id.index in tags
eg: t0=**,id=**,t1=**
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name)
- def idUpperCheckCase(self):
+ def idLetterCheckCase(self):
"""
check id param
eg: id and ID
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True)
self.resCmp(input_sql, stb_name)
+ input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True)
+ self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True)
self.resCmp(input_sql, stb_name)
@@ -393,7 +542,8 @@ class TDTestCase:
"""
id not exist
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -406,171 +556,119 @@ class TDTestCase:
def maxColTagCheckCase(self):
"""
max tag count is 128
- max col count is ??
+ max col count is 4096
"""
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_sql in [self.genLongSql(128, 1)[0], self.genLongSql(1, 4094)[0]]:
- self.cleanStb()
+ tdCom.cleanTb()
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]:
- self.cleanStb()
+ tdCom.cleanTb()
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
- def idIllegalNameCheckCase(self):
+ def stbTbNameCheckCase(self):
"""
test illegal id name
- mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
+ mix "~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
"""
- self.cleanStb()
- rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ rstr = list("~!@#$¥%^&*()-+=|[]、「」【】\;:《》<>?")
for i in rstr:
- input_sql = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"")[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
+ stb_name=f"aaa{i}bbb"
+ input_sql = self.genFullTypeSql(stb_name=stb_name, tb_name=f'{stb_name}_sub')[0]
+ self.resCmp(input_sql, f'`{stb_name}`')
+ tdSql.execute(f'drop table if exists `{stb_name}`')
def idStartWithNumCheckCase(self):
"""
id is start with num
"""
- self.cleanStb()
- input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb")
+ self.resCmp(input_sql, stb_name)
def nowTsCheckCase(self):
"""
check now unsupported
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
def dateFormatTsCheckCase(self):
"""
check date format ts unsupported
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
def illegalTsCheckCase(self):
"""
check ts format like 16260068336390us19
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
- def tagValueLengthCheckCase(self):
+ def tbnameCheckCase(self):
"""
- check full type tag value limit
+ check length 192
+ check upper tbname
+ chech upper tag
+ length of stb_name tb_name <= 192
"""
- self.cleanStb()
- # i8
- for t1 in ["-127i8", "127i8"]:
- input_sql, stb_name = self.genFullTypeSql(t1=t1)
- self.resCmp(input_sql, stb_name)
- for t1 in ["-128i8", "128i8"]:
- input_sql = self.genFullTypeSql(t1=t1)[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
-
- #i16
- for t2 in ["-32767i16", "32767i16"]:
- input_sql, stb_name = self.genFullTypeSql(t2=t2)
- self.resCmp(input_sql, stb_name)
- for t2 in ["-32768i16", "32768i16"]:
- input_sql = self.genFullTypeSql(t2=t2)[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
-
- #i32
- for t3 in ["-2147483647i32", "2147483647i32"]:
- input_sql, stb_name = self.genFullTypeSql(t3=t3)
- self.resCmp(input_sql, stb_name)
- for t3 in ["-2147483648i32", "2147483648i32"]:
- input_sql = self.genFullTypeSql(t3=t3)[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
-
- #i64
- for t4 in ["-9223372036854775807i64", "9223372036854775807i64"]:
- input_sql, stb_name = self.genFullTypeSql(t4=t4)
- self.resCmp(input_sql, stb_name)
- for t4 in ["-9223372036854775808i64", "9223372036854775808i64"]:
- input_sql = self.genFullTypeSql(t4=t4)[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- except SchemalessError:
- pass
-
- # f32
- for t5 in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
- input_sql, stb_name = self.genFullTypeSql(t5=t5)
- self.resCmp(input_sql, stb_name)
- # * limit set to 4028234664*(10**38)
- for t5 in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
- input_sql = self.genFullTypeSql(t5=t5)[0]
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
-
-
- # f64
- for t6 in [f'{-1.79769*(10**308)}f64', f'{-1.79769*(10**308)}f64']:
- input_sql, stb_name = self.genFullTypeSql(t6=t6)
- self.resCmp(input_sql, stb_name)
- # * limit set to 1.797693134862316*(10**308)
- for c6 in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
- input_sql = self.genFullTypeSql(c6=c6)[0]
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ tb_name_192 = tdCom.getLongName(len=192, mode="letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
+ self.resCmp(input_sql, stb_name)
+ tdSql.query(f'select * from {stb_name}')
+ tdSql.checkRows(1)
+ for input_sql in [self.genFullTypeSql(stb_name=tdCom.getLongName(len=193, mode="letters"), tb_name=tdCom.getLongName(len=5, mode="letters"))[0], self.genFullTypeSql(tb_name=tdCom.getLongName(len=193, mode="letters"))[0]]:
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
- # binary
- stb_name = self.getLongName(7, "letters")
- input_sql = f'{stb_name},t0=t,t1="{self.getLongName(16374, "letters")}" c0=f 1626006833639000000ns'
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
- input_sql = f'{stb_name},t0=t,t1="{self.getLongName(16375, "letters")}" c0=f 1626006833639000000ns'
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- raise Exception("should not reach here")
- except SchemalessError as err:
- pass
+ input_sql = 'Abcdffgg,id=Abcddd,T1=127i8 c0=False 1626006833639000000'
+ stb_name = "Abcdffgg"
+ self.resCmp(input_sql, stb_name)
+ def tagValueLengthCheckCase(self):
+ """
+ check full type tag value limit
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
# nchar
# * legal nchar could not be larger than 16374/4
- stb_name = self.getLongName(7, "letters")
- input_sql = f'{stb_name},t0=t,t1=L"{self.getLongName(4093, "letters")}" c0=f 1626006833639000000ns'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name},t0=t,t1={tdCom.getLongName(4093, "letters")} c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
- input_sql = f'{stb_name},t0=t,t1=L"{self.getLongName(4094, "letters")}" c0=f 1626006833639000000ns'
+ input_sql = f'{stb_name},t0=t,t1={tdCom.getLongName(4094, "letters")} c0=f 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
@@ -581,7 +679,8 @@ class TDTestCase:
"""
check full type col value limit
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
# i8
for c1 in ["-127i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(c1=c1)
@@ -657,11 +756,10 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# # binary
- stb_name = self.getLongName(7, "letters")
- input_sql = f'{stb_name},t0=t c0=f,c1="{self.getLongName(16374, "letters")}" 1626006833639000000ns'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}" 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
- input_sql = f'{stb_name},t0=t c0=f,c1="{self.getLongName(16375, "letters")}" 1626006833639000000ns'
+ input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16375, "letters")}" 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
@@ -670,11 +768,11 @@ class TDTestCase:
# nchar
# * legal nchar could not be larger than 16374/4
- stb_name = self.getLongName(7, "letters")
- input_sql = f'{stb_name},t0=t c0=f,c1=L"{self.getLongName(4093, "letters")}" 1626006833639000000ns'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4093, "letters")}" 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- input_sql = f'{stb_name},t0=t c0=f,c1=L"{self.getLongName(4094, "letters")}" 1626006833639000000ns'
+ input_sql = f'{stb_name},t0=t c0=f,c1=L"{tdCom.getLongName(4094, "letters")}" 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
@@ -686,30 +784,17 @@ class TDTestCase:
"""
test illegal tag col value
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
- input_sql1 = self.genFullTypeSql(t0=i)[0]
- try:
- self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
- input_sql2 = self.genFullTypeSql(c0=i)[0]
- try:
- self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ input_sql1, stb_name = self.genFullTypeSql(t0=i)
+ self.resCmp(input_sql1, stb_name)
+ input_sql2, stb_name = self.genFullTypeSql(c0=i)
+ self.resCmp(input_sql2, stb_name)
# i8 i16 i32 i64 f32 f64
for input_sql in [
- self.genFullTypeSql(t1="1s2i8")[0],
- self.genFullTypeSql(t2="1s2i16")[0],
- self.genFullTypeSql(t3="1s2i32")[0],
- self.genFullTypeSql(t4="1s2i64")[0],
- self.genFullTypeSql(t5="11.1s45f32")[0],
- self.genFullTypeSql(t6="11.1s45f64")[0],
self.genFullTypeSql(c1="1s2i8")[0],
self.genFullTypeSql(c2="1s2i16")[0],
self.genFullTypeSql(c3="1s2i32")[0],
@@ -725,35 +810,31 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# check binary and nchar blank
- stb_name = self.getLongName(7, "letters")
- input_sql1 = f'{stb_name},t0=t c0=f,c1="abc aaa" 1626006833639000000ns'
- input_sql2 = f'{stb_name},t0=t c0=f,c1=L"abc aaa" 1626006833639000000ns'
- input_sql3 = f'{stb_name},t0=t,t1="abc aaa" c0=f 1626006833639000000ns'
- input_sql4 = f'{stb_name},t0=t,t1=L"abc aaa" c0=f 1626006833639000000ns'
+ stb_name = tdCom.getLongName(7, "letters")
+ input_sql1 = f'{stb_name}_1,t0=t c0=f,c1="abc aaa" 1626006833639000000'
+ input_sql2 = f'{stb_name}_2,t0=t c0=f,c1=L"abc aaa" 1626006833639000000'
+ input_sql3 = f'{stb_name}_3,t0=t,t1="abc aaa" c0=f 1626006833639000000'
+ input_sql4 = f'{stb_name}_4,t0=t,t1=L"abc aaa" c0=f 1626006833639000000'
for input_sql in [input_sql1, input_sql2, input_sql3, input_sql4]:
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- raise Exception("should not reach here")
- except SchemalessError as err:
- tdSql.checkNotEqual(err.errno, 0)
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
# check accepted binary and nchar symbols
# # * ~!@#$¥%^&*()-+={}|[]、「」:;
for symbol in list('~!@#$¥%^&*()-+={}|[]、「」:;'):
- input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000ns'
- input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000ns'
- self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
+ input_sql1 = f'{stb_name},t0=t c0=f,c1="abc{symbol}aaa" 1626006833639000000'
+ input_sql2 = f'{stb_name},t0=t,t1="abc{symbol}aaa" c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, None)
+ self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, None)
def duplicateIdTagColInsertCheckCase(self):
"""
check duplicate Id Tag Col
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
- self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -761,7 +842,7 @@ class TDTestCase:
input_sql = self.genFullTypeSql()[0]
input_sql_tag = input_sql.replace("t5", "t6")
try:
- self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self._conn.schemaless_insert([input_sql_tag], TDSmlProtocolType.LINE.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -769,7 +850,7 @@ class TDTestCase:
input_sql = self.genFullTypeSql()[0]
input_sql_col = input_sql.replace("c5", "c6")
try:
- self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -777,7 +858,7 @@ class TDTestCase:
input_sql = self.genFullTypeSql()[0]
input_sql_col = input_sql.replace("c5", "C6")
try:
- self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self._conn.schemaless_insert([input_sql_col], TDSmlProtocolType.LINE.value, None)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
@@ -787,20 +868,21 @@ class TDTestCase:
"""
case no id when stb exist
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f")
self.resCmp(input_sql, stb_name, condition='where tbname like "t_%"')
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
- # TODO cover other case
def duplicateInsertExistCheckCase(self):
"""
check duplicate insert when stb exist
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -810,11 +892,12 @@ class TDTestCase:
"""
check length increase
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
- tb_name = self.getLongName(5, "letters")
- input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name,t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"", c7="\"binaryTagValuebinaryTagValue\"", c8="L\"ncharTagValuencharTagValue\"")
+ tb_name = tdCom.getLongName(5, "letters")
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7="\"binaryTagValuebinaryTagValue\"", t8="L\"ncharTagValuencharTagValue\"", c7="\"binaryTagValuebinaryTagValue\"", c8="L\"ncharTagValuencharTagValue\"")
self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
def tagColAddDupIDCheckCase(self):
@@ -826,28 +909,42 @@ class TDTestCase:
* col is added without value when update==0
* col is added with value when update==1
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
self.createDb("test_update", db_update_tag=db_update_tag)
- input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
+ input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="t", c0="t")
self.resCmp(input_sql, stb_name)
- self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="f", c0="f", ct_add_tag=True)
+ input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t0="t", c0="f", ct_add_tag=True)
if db_update_tag == 1 :
- self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"')
- else:
self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name}"', none_check_tag=True)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 11, "ncharColValue")
+ tdSql.checkData(0, 12, True)
+ tdSql.checkData(0, 22, None)
+ tdSql.checkData(0, 23, None)
+ else:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ tdSql.query(f'select * from {stb_name} where tbname like "{tb_name}"')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 11, None)
+ tdSql.checkData(0, 12, None)
+ tdSql.checkData(0, 22, None)
+ tdSql.checkData(0, 23, None)
+ self.createDb()
def tagColAddCheckCase(self):
"""
check column and tag count add
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
self.resCmp(input_sql, stb_name)
- tb_name_1 = self.getLongName(7, "letters")
+ tb_name_1 = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name_1, t0="f", c0="f", ct_add_tag=True)
self.resCmp(input_sql, stb_name, condition=f'where tbname like "{tb_name_1}"')
res_row_list = self.resHandle(f"select c10,c11,t10,t11 from {tb_name}", True)[0]
@@ -859,7 +956,8 @@ class TDTestCase:
condition: stb not change
insert two table, keep tag unchange, change col
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -881,59 +979,46 @@ class TDTestCase:
"""
every binary and nchar must be length+2
"""
- self.cleanStb()
- stb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
- input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000ns'
+ input_sql = f'{stb_name},id={tb_name},t0=t c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- # * every binary and nchar must be length+2, so here is two tag, max length could not larger than 16384-2*2
- input_sql = f'{stb_name},t0=t,t1="{self.getLongName(16374, "letters")}",t2="{self.getLongName(5, "letters")}" c0=f 1626006833639000000ns'
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
-
- tdSql.query(f"select * from {stb_name}")
- tdSql.checkRows(2)
- input_sql = f'{stb_name},t0=t,t1="{self.getLongName(16374, "letters")}",t2="{self.getLongName(6, "letters")}" c0=f 1626006833639000000ns'
- try:
- self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- raise Exception("should not reach here")
- except SchemalessError:
- pass
- tdSql.query(f"select * from {stb_name}")
- tdSql.checkRows(2)
-
# # * check col,col+ts max in describe ---> 16143
- input_sql = f'{stb_name},t0=t c0=f,c1="{self.getLongName(16374, "letters")}",c2="{self.getLongName(16374, "letters")}",c3="{self.getLongName(16374, "letters")}",c4="{self.getLongName(12, "letters")}" 1626006833639000000ns'
+ input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(12, "letters")}" 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
tdSql.query(f"select * from {stb_name}")
- tdSql.checkRows(3)
- input_sql = f'{stb_name},t0=t c0=f,c1="{self.getLongName(16374, "letters")}",c2="{self.getLongName(16374, "letters")}",c3="{self.getLongName(16374, "letters")}",c4="{self.getLongName(13, "letters")}" 1626006833639000000ns'
+ tdSql.checkRows(2)
+ input_sql = f'{stb_name},t0=t c0=f,c1="{tdCom.getLongName(16374, "letters")}",c2="{tdCom.getLongName(16374, "letters")}",c3="{tdCom.getLongName(16374, "letters")}",c4="{tdCom.getLongName(13, "letters")}" 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
tdSql.query(f"select * from {stb_name}")
- tdSql.checkRows(3)
+ tdSql.checkRows(2)
# * tag nchar max is 16374/4, col+ts nchar max 49151
def tagColNcharMaxLengthCheckCase(self):
"""
check nchar length limit
"""
- self.cleanStb()
- stb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
- input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000ns'
- code = self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ input_sql = f'{stb_name},id={tb_name},t2={tdCom.getLongName(1, "letters")} c0=f 1626006833639000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
# * legal nchar could not be larger than 16374/4
- input_sql = f'{stb_name},t0=t,t1=L"{self.getLongName(4093, "letters")}",t2=L"{self.getLongName(1, "letters")}" c0=f 1626006833639000000ns'
+ input_sql = f'{stb_name},t1={tdCom.getLongName(4093, "letters")},t2={tdCom.getLongName(1, "letters")} c0=f 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
- input_sql = f'{stb_name},t0=t,t1=L"{self.getLongName(4093, "letters")}",t2=L"{self.getLongName(2, "letters")}" c0=f 1626006833639000000ns'
+ input_sql = f'{stb_name},t1={tdCom.getLongName(4093, "letters")},t2={tdCom.getLongName(2, "letters")} c0=f 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
@@ -942,11 +1027,11 @@ class TDTestCase:
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(2)
- input_sql = f'{stb_name},t0=t c0=f,c1=L"{self.getLongName(4093, "letters")}",c2=L"{self.getLongName(4093, "letters")}",c3=L"{self.getLongName(4093, "letters")}",c4=L"{self.getLongName(4, "letters")}" 1626006833639000000ns'
+ input_sql = f'{stb_name},t2={tdCom.getLongName(1, "letters")} c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(4, "letters")}" 1626006833639000000'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
tdSql.query(f"select * from {stb_name}")
tdSql.checkRows(3)
- input_sql = f'{stb_name},t0=t c0=f,c1=L"{self.getLongName(4093, "letters")}",c2=L"{self.getLongName(4093, "letters")}",c3=L"{self.getLongName(4093, "letters")}",c4=L"{self.getLongName(5, "letters")}" 1626006833639000000ns'
+ input_sql = f'{stb_name},t2={tdCom.getLongName(1, "letters")} c0=f,c1=L"{tdCom.getLongName(4093, "letters")}",c2=L"{tdCom.getLongName(4093, "letters")}",c3=L"{tdCom.getLongName(4093, "letters")}",c4=L"{tdCom.getLongName(5, "letters")}" 1626006833639000000'
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
@@ -959,48 +1044,162 @@ class TDTestCase:
"""
test batch insert
"""
- self.cleanStb()
- stb_name = self.getLongName(8, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
- lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
- "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000ns",
- f"{stb_name},t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532ns",
- "stf567890,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns",
- "st123456,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000ns",
- f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532ns",
- f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532ns",
- "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000ns",
- "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000ns"
+ lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
+ "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64 1626006833640000000",
+ f"{stb_name},t2=5f64,t3=L\"ste\" c1=true,c2=4i64,c3=\"iam\" 1626056811823316532",
+ "stf567890,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000",
+ "st123456,t1=4i64,t2=5f64,t3=\"t4\" c1=3i64,c3=L\"passitagain\",c2=true,c4=5f64 1626006833642000000",
+ f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false 1626056811843316532",
+ f"{stb_name},t2=5f64,t3=L\"ste2\" c3=\"iamszhou\",c4=false,c5=32i8,c6=64i16,c7=32i32,c8=88.88f32 1626056812843316532",
+ "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin\",c2=true,c4=5f64,c5=5f64,c6=7u64 1626006933640000000",
+ "st123456,t1=4i64,t3=\"t4\",t2=5f64,t4=5f64 c1=3i64,c3=L\"passitagin_stf\",c2=false,c5=5f64,c6=7u64 1626006933641000000"
]
self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ tdSql.query('show stables')
+ tdSql.checkRows(3)
+ tdSql.query('show tables')
+ tdSql.checkRows(6)
+ tdSql.query('select * from st123456')
+ tdSql.checkRows(5)
def multiInsertCheckCase(self, count):
- """
- test multi insert
- """
- self.cleanStb()
- sql_list = []
- stb_name = self.getLongName(8, "letters")
- tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
- for i in range(count):
- input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
- sql_list.append(input_sql)
- self._conn.schemaless_insert(sql_list, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ """
+ test multi insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ sql_list = []
+ stb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
+ for i in range(count):
+ input_sql = self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True)[0]
+ sql_list.append(input_sql)
+ self._conn.schemaless_insert(sql_list, TDSmlProtocolType.LINE.value, None)
+ tdSql.query('show tables')
+ tdSql.checkRows(count)
def batchErrorInsertCheckCase(self):
"""
test batch error insert
"""
- self.cleanStb()
- stb_name = self.getLongName(8, "letters")
- lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000ns",
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i 64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"]
try:
- self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self._conn.schemaless_insert(lines, TDSmlProtocolType.LINE.value, None)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def multiColsInsertCheckCase(self):
+ """
+ test multi cols insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def multiTagsInsertCheckCase(self):
+ """
+ test multi tags insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(t_multi_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def blankColInsertCheckCase(self):
+ """
+ test blank col insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
raise Exception("should not reach here")
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
+ def blankTagInsertCheckCase(self):
+ """
+ test blank tag insert
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
+ try:
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ raise Exception("should not reach here")
+ except SchemalessError as err:
+ tdSql.checkNotEqual(err.errno, 0)
+
+ def chineseCheckCase(self):
+ """
+ check nchar ---> chinese
+ """
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
+ self.resCmp(input_sql, stb_name)
+
+ def spellCheckCase(self):
+ stb_name = tdCom.getLongName(8, "letters")
+ tdCom.cleanTb()
+ input_sql_list = [f'{stb_name}_1,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_2,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_3,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_4,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_5,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_6,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_7,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_8,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_9,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_10,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789F64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(',')[0]
+ self.resCmp(input_sql, stb_name)
+
+ def defaultTypeCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ stb_name = tdCom.getLongName(8, "letters")
+ input_sql_list = [f'{stb_name}_1,t0=127,t1=32767I16,t2=2147483647I32,t3=9223372036854775807,t4=11.12345027923584F32,t5=22.123456789F64 c0=127,c1=32767I16,c2=2147483647I32,c3=9223372036854775807,c4=11.12345027923584F32,c5=22.123456789F64 1626006833639000000',
+ f'{stb_name}_2,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=22.123456789 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=22.123456789 1626006833639000000',
+ f'{stb_name}_3,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=10e5F32 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=10e5F64 1626006833639000000',
+ f'{stb_name}_4,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=10.0e5f64 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=10.0e5f32 1626006833639000000',
+ f'{stb_name}_5,t0=127I8,t1=32767I16,t2=2147483647I32,t3=9223372036854775807I64,t4=11.12345027923584F32,t5=-10.0e5 c0=127I8,c1=32767I16,c2=2147483647I32,c3=9223372036854775807I64,c4=11.12345027923584F32,c5=-10.0e5 1626006833639000000']
+ for input_sql in input_sql_list:
+ stb_name = input_sql.split(",")[0]
+ self.resCmp(input_sql, stb_name)
+
+ def tbnameTagsColsNameCheckCase(self):
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ input_sql = 'rFa$sta,id=rFas$ta_1,Tt!0=true,tT@1=127i8,t#2=32767i16,\"t$3\"=2147483647i32,t%4=9223372036854775807i64,t^5=11.12345f32,t&6=22.123456789f64,t*7=\"ddzhiksj\",t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\" C)0=True,c{1=127i8,c[2=32767i16,c;3=2147483647i32,c:4=9223372036854775807i64,c<5=11.12345f32,c>6=22.123456789f64,c?7=\"bnhwlgvj\",c.8=L\"ncharTagValue\",c!@#$%^&*()_+[];:<>?,=7u64 1626006933640000000'
+ self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ query_sql = 'select * from `rfa$sta`'
+ query_res = tdSql.query(query_sql, True)
+ tdSql.checkEqual(query_res, [(datetime.datetime(2021, 7, 11, 20, 35, 33, 640000), True, 127, 32767, 2147483647, 9223372036854775807, 11.12345027923584, 22.123456789, 'bnhwlgvj', 'ncharTagValue', 7, 'true', '127i8', '32767i16', '2147483647i32', '9223372036854775807i64', '11.12345f32', '22.123456789f64', '"ddzhiksj"', 'L"ncharTagValue"')])
+ col_tag_res = tdSql.getColNameList(query_sql)
+ tdSql.checkEqual(col_tag_res, ['_ts', 'c)0', 'c{1', 'c[2', 'c;3', 'c:4', 'c<5', 'c>6', 'c?7', 'c.8', 'c!@#$%^&*()_+[];:<>?,', 'tt!0', 'tt@1', 't#2', '"t$3"', 't%4', 't^5', 't&6', 't*7', 't!@#$%^&*()_+[];:<>?,9'])
+ tdSql.execute('drop table `rfa$sta`')
+
def genSqlList(self, count=5, stb_name="", tb_name=""):
"""
stb --> supertable
@@ -1027,19 +1226,19 @@ class TDTestCase:
s_stb_d_tb_d_ts_a_col_m_tag_list = list()
s_stb_d_tb_d_ts_a_tag_m_col_list = list()
for i in range(count):
- d_stb_d_tb_list.append(self.genFullTypeSql(t0="f", c0="f"))
- s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"'))
- s_stb_s_tb_a_col_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', ct_add_tag=True))
- s_stb_s_tb_m_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', ct_min_tag=True))
- s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True))
- s_stb_d_tb_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True, ct_am_tag=True))
- s_stb_d_tb_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True, ct_ma_tag=True))
- s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', ts=0))
- s_stb_s_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', ts=0, ct_am_tag=True))
- s_stb_s_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', ts=0, ct_ma_tag=True))
- s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0))
- s_stb_d_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_am_tag=True))
- s_stb_d_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{self.getLongName(8, "letters")}"', c7=f'"{self.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_ma_tag=True))
+ d_stb_d_tb_list.append(self.genFullTypeSql(c0="t"))
+ s_stb_s_tb_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"'))
+ s_stb_s_tb_a_col_a_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_add_tag=True))
+ s_stb_s_tb_m_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ct_min_tag=True))
+ s_stb_d_tb_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True))
+ s_stb_d_tb_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_am_tag=True))
+ s_stb_d_tb_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ct_ma_tag=True))
+ s_stb_s_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0))
+ s_stb_s_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_am_tag=True))
+ s_stb_s_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, tb_name=tb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', ts=0, ct_ma_tag=True))
+ s_stb_d_tb_d_ts_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0))
+ s_stb_d_tb_d_ts_a_col_m_tag_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_am_tag=True))
+ s_stb_d_tb_d_ts_a_tag_m_col_list.append(self.genFullTypeSql(stb_name=stb_name, t7=f'"{tdCom.getLongName(8, "letters")}"', c7=f'"{tdCom.getLongName(8, "letters")}"', id_noexist_tag=True, ts=0, ct_ma_tag=True))
return d_stb_d_tb_list, s_stb_s_tb_list, s_stb_s_tb_a_col_a_tag_list, s_stb_s_tb_m_col_m_tag_list, \
s_stb_d_tb_list, s_stb_d_tb_a_col_m_tag_list, s_stb_d_tb_a_tag_m_col_list, s_stb_s_tb_d_ts_list, \
@@ -1050,7 +1249,7 @@ class TDTestCase:
def genMultiThreadSeq(self, sql_list):
tlist = list()
for insert_sql in sql_list:
- t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value))
+ t = threading.Thread(target=self._conn.schemaless_insert,args=([insert_sql[0]], TDSmlProtocolType.LINE.value, None))
tlist.append(t)
return tlist
@@ -1064,7 +1263,8 @@ class TDTestCase:
"""
thread input different stb
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql = self.genSqlList()[0]
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;")
@@ -1074,8 +1274,9 @@ class TDTestCase:
"""
thread input same stb tb, different data, result keep first data
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
s_stb_s_tb_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[1]
@@ -1091,8 +1292,9 @@ class TDTestCase:
"""
thread input same stb tb, different data, add columes and tags, result keep first data
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
s_stb_s_tb_a_col_a_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[2]
@@ -1108,8 +1310,9 @@ class TDTestCase:
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
s_stb_s_tb_m_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[3]
@@ -1125,7 +1328,8 @@ class TDTestCase:
"""
thread input same stb, different tb, different data
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1137,15 +1341,16 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add col, mul tag
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
# s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5]
- s_stb_d_tb_a_col_m_tag_list = [(f'{stb_name},t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="ngxgzdzs",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=F 1626006833639000000ns', 'hpxbys'), \
- (f'{stb_name},t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="vvfrdtty",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=True 1626006833639000000ns', 'hpxbys'), \
- (f'{stb_name},t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="kzscucnt",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=f 1626006833639000000ns', 'hpxbys'), \
- (f'{stb_name},t0=false,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="asegdbqk",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=false 1626006833639000000ns', 'hpxbys'), \
- (f'{stb_name},t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="yvqnhgmn",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=T 1626006833639000000ns', 'hpxbys')]
+ s_stb_d_tb_a_col_m_tag_list = [(f'{stb_name},t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="ngxgzdzs",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=F 1626006833639000000', 'hpxbys'), \
+ (f'{stb_name},t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="vvfrdtty",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=True 1626006833639000000', 'hpxbys'), \
+ (f'{stb_name},t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="kzscucnt",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=f 1626006833639000000', 'hpxbys'), \
+ (f'{stb_name},t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="asegdbqk",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=false 1626006833639000000', 'hpxbys'), \
+ (f'{stb_name},t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=true,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="yvqnhgmn",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=T 1626006833639000000', 'hpxbys')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_a_col_m_tag_list))
tdSql.query(f"show tables;")
tdSql.checkRows(3)
@@ -1154,7 +1359,8 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add tag, mul col
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1166,16 +1372,17 @@ class TDTestCase:
"""
thread input same stb tb, different ts
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
# s_stb_s_tb_d_ts_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[7]
- s_stb_s_tb_d_ts_list =[(f'{stb_name},id="{tb_name}",t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="tgqkvsws",t8=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="htvnnldm",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
- (f'{stb_name},id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="fvrhhqiy",t8=L"ncharTagValue" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="gybqvhos",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
- (f'{stb_name},id="{tb_name}",t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vifkabhu",t8=L"ncharTagValue" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="zlvxgquy",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
- (f'{stb_name},id="{tb_name}",t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="lsyotcrn",t8=L"ncharTagValue" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="oaupfgtz",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
- (f'{stb_name},id="{tb_name}",t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="jrwamcgy",t8=L"ncharTagValue" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="vgzadjsh",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz')]
+ s_stb_s_tb_d_ts_list =[(f'{stb_name},id={tb_name},t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="tgqkvsws",t8=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="htvnnldm",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
+ (f'{stb_name},id={tb_name},t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="fvrhhqiy",t8=L"ncharTagValue" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="gybqvhos",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
+ (f'{stb_name},id={tb_name},t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vifkabhu",t8=L"ncharTagValue" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="zlvxgquy",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
+ (f'{stb_name},id={tb_name},t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="lsyotcrn",t8=L"ncharTagValue" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="oaupfgtz",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz'), \
+ (f'{stb_name},id={tb_name},t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="jrwamcgy",t8=L"ncharTagValue" c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="vgzadjsh",c8=L"ncharColValue",c9=7u64 0', 'sfzqdz')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_list))
tdSql.query(f"show tables;")
tdSql.checkRows(1)
@@ -1187,8 +1394,9 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add col, mul tag
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
s_stb_s_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[8]
@@ -1206,16 +1414,17 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add tag, mul col
"""
- self.cleanStb()
- tb_name = self.getLongName(7, "letters")
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
# s_stb_s_tb_d_ts_a_tag_m_col_list = self.genSqlList(stb_name=stb_name, tb_name=tb_name)[9]
- s_stb_s_tb_d_ts_a_tag_m_col_list = [(f'{stb_name},id="{tb_name}",t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="xsajdfjc",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
- (f'{stb_name},id="{tb_name}",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="qzeyolgt",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
- (f'{stb_name},id="{tb_name}",t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="suxqziwh",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
- (f'{stb_name},id="{tb_name}",t0=false,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vapolpgr",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
- (f'{stb_name},id="{tb_name}",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="eustwpfl",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb')]
+ s_stb_s_tb_d_ts_a_tag_m_col_list = [(f'{stb_name},id={tb_name},t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="xsajdfjc",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
+ (f'{stb_name},id={tb_name},t0=T,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="qzeyolgt",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
+ (f'{stb_name},id={tb_name},t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="suxqziwh",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
+ (f'{stb_name},id={tb_name},t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="vapolpgr",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb'), \
+ (f'{stb_name},id={tb_name},t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="eustwpfl",t8=L"ncharTagValue",t11=127i8,t10=L"ncharTagValue" c0=t,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 0', 'rgqcfb')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_s_tb_d_ts_a_tag_m_col_list))
tdSql.query(f"show tables;")
tdSql.checkRows(1)
@@ -1232,7 +1441,8 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1244,49 +1454,45 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts, add col, mul tag
"""
- self.cleanStb()
+ tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
+ tdCom.cleanTb()
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
# s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11]
s_stb_d_tb_d_ts_a_col_m_tag_list = [(f'{stb_name},t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="eltflgpz",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=True 0', 'ynnlov'), \
- (f'{stb_name},t0=t,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="ysznggwl",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=t 0', 'ynnlov'), \
+ (f'{stb_name},t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=False,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="ysznggwl",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=t 0', 'ynnlov'), \
(f'{stb_name},t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="nxwjucch",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=f 0', 'ynnlov'), \
- (f'{stb_name},t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="fzseicnt",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=F 0', 'ynnlov'), \
- (f'{stb_name},t0=False,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="zwgurhdp",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=False 0', 'ynnlov')]
+ (f'{stb_name},t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=T,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="fzseicnt",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=F 0', 'ynnlov'), \
+ (f'{stb_name},t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=F,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="zwgurhdp",c8=L"ncharColValue",c9=7u64,c11=L"ncharColValue",c10=False 0', 'ynnlov')]
self.multiThreadRun(self.genMultiThreadSeq(s_stb_d_tb_d_ts_a_col_m_tag_list))
tdSql.query(f"show tables;")
tdSql.checkRows(3)
def test(self):
- input_sql1 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"ddzhiksj\",t8=L\"ncharTagValue\" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"bnhwlgvj\",c8=L\"ncharTagValue\",c9=7u64 1626006933640000000ns"
- input_sql2 = "rfasta,id=\"rfasta_1\",t0=true,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64 c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64 1626006933640000000ns"
+ input_sql1 = "rfa$sta,id=rfas$ta_1,T!0=true,t@1=127i8,t#2=32767i16,t$3=2147483647i32,t%4=9223372036854775807i64,t^5=11.12345f32,t&6=22.123456789f64,t*7=\"ddzhiksj\",t(8=L\"ncharTagValue\" C)0=True,c{1=127i8,c[2=32767i16,c;3=2147483647i32,c:4=9223372036854775807i64,c<5=11.12345f32,c>6=22.123456789f64,c?7=\"bnhwlgvj\",c.8=L\"ncharTagValue\",c,9=7u64 1626006933640000000ns"
try:
- self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
- self._conn.schemaless_insert([input_sql2], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
+ self._conn.schemaless_insert([input_sql1], TDSmlProtocolType.LINE.value, None)
+ # self._conn.schemaless_insert([input_sql2])
except SchemalessError as err:
print(err.errno)
- # self._conn.schemaless_insert([input_sql2], 0)
- # input_sql3 = f'abcd,id="cc¥Ec",t0=True,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="ndsfdrum",t8=L"ncharTagValue" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="igwoehkm",c8=L"ncharColValue",c9=7u64 0'
- # print(input_sql3)
- # input_sql4 = 'hmemeb,id="kilrcrldgf",t0=F,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7="fysodjql",t8=L"ncharTagValue" c0=True,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7="waszbfvc",c8=L"ncharColValue",c9=7u64 0'
- # code = self._conn.schemaless_insert([input_sql3], 0)
- # print(code)
- # self._conn.schemaless_insert([input_sql4], 0)
def runAll(self):
self.initCheckCase()
self.boolTypeCheckCase()
self.symbolsCheckCase()
self.tsCheckCase()
+ self.zeroTsCheckCase()
+ self.iuCheckCase()
self.idSeqCheckCase()
- self.idUpperCheckCase()
+ self.idLetterCheckCase()
self.noIdCheckCase()
self.maxColTagCheckCase()
- self.idIllegalNameCheckCase()
+ self.stbTbNameCheckCase()
self.idStartWithNumCheckCase()
self.nowTsCheckCase()
self.dateFormatTsCheckCase()
self.illegalTsCheckCase()
+ self.tbnameCheckCase()
self.tagValueLengthCheckCase()
self.colValueLengthCheckCase()
self.tagColIllegalValueCheckCase()
@@ -1300,32 +1506,31 @@ class TDTestCase:
self.tagColBinaryMaxLengthCheckCase()
self.tagColNcharMaxLengthCheckCase()
self.batchInsertCheckCase()
- self.multiInsertCheckCase(1000)
+ self.multiInsertCheckCase(100)
self.batchErrorInsertCheckCase()
+ self.multiColsInsertCheckCase()
+ self.multiTagsInsertCheckCase()
+ self.blankColInsertCheckCase()
+ self.blankTagInsertCheckCase()
+ self.chineseCheckCase()
+ self.spellCheckCase()
+ self.defaultTypeCheckCase()
+ self.tbnameTagsColsNameCheckCase()
+
# MultiThreads
self.stbInsertMultiThreadCheckCase()
self.sStbStbDdataInsertMultiThreadCheckCase()
self.sStbStbDdataAtcInsertMultiThreadCheckCase()
self.sStbStbDdataMtcInsertMultiThreadCheckCase()
self.sStbDtbDdataInsertMultiThreadCheckCase()
-
- # # ! concurrency conflict
self.sStbDtbDdataAcMtInsertMultiThreadCheckCase()
self.sStbDtbDdataAtMcInsertMultiThreadCheckCase()
-
self.sStbStbDdataDtsInsertMultiThreadCheckCase()
-
- # # ! concurrency conflict
self.sStbStbDdataDtsAcMtInsertMultiThreadCheckCase()
self.sStbStbDdataDtsAtMcInsertMultiThreadCheckCase()
-
self.sStbDtbDdataDtsInsertMultiThreadCheckCase()
-
- # ! concurrency conflict
self.sStbDtbDdataDtsAcMtInsertMultiThreadCheckCase()
-
-
def run(self):
print("running {}".format(__file__))
self.createDb()
@@ -1334,12 +1539,10 @@ class TDTestCase:
except Exception as err:
print(''.join(traceback.format_exception(None, err, err.__traceback__)))
raise err
- # self.tagColIllegalValueCheckCase()
- # self.test()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/insert/special_character_show.py b/tests/pytest/insert/special_character_show.py
index 3b2df5c87380c22fb18cbee06c866249b4365a70..ce9f1de76aa5896beb3aa78dce8a3a65a81a973c 100644
--- a/tests/pytest/insert/special_character_show.py
+++ b/tests/pytest/insert/special_character_show.py
@@ -31,9 +31,8 @@ class TDTestCase:
tdLog.info('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
tdSql.execute('create table stb1 (ts timestamp, value double) tags (bin binary(128))')
- tdLog.info('=============== step2,create table增加了转义字符')
+ tdLog.info('=============== step2,create table with escape character')
tdLog.info('create table tb1 using stb1 tags("abc\\"def")')
- #增加了转义字符\
tdSql.execute('create table tb1 using stb1 tags("abc\\"def")')
tdLog.info('=============== step3,insert data')
diff --git a/tests/pytest/query/nestedQuery/nestedQuery.py b/tests/pytest/query/nestedQuery/nestedQuery.py
index 453ee8f53975509c318486242c634d3b60de4992..545f6429e825c468bdb07524329d6ea49944e379 100755
--- a/tests/pytest/query/nestedQuery/nestedQuery.py
+++ b/tests/pytest/query/nestedQuery/nestedQuery.py
@@ -1714,7 +1714,6 @@ class TDTestCase:
sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_where)
sql += "%s " % random.choice(session_u_where)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -1731,7 +1730,6 @@ class TDTestCase:
sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_or_where)
sql += "%s " % random.choice(session_u_where)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -1767,7 +1765,6 @@ class TDTestCase:
sql += " from table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_where)
sql += "%s " % random.choice(session_u_where)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -1784,7 +1781,6 @@ class TDTestCase:
sql += " from table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_or_where)
sql += "%s " % random.choice(session_u_where)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -1818,7 +1814,6 @@ class TDTestCase:
sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(t_join_where)
sql += "%s " % random.choice(session_u_where)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -1835,7 +1830,6 @@ class TDTestCase:
sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(qt_u_or_where)
sql += "%s " % random.choice(session_u_where)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -2015,7 +2009,6 @@ class TDTestCase:
sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
sql += "%s and " % random.choice(t_join_where)
sql += "%s " % random.choice(interp_where_j)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -2032,7 +2025,6 @@ class TDTestCase:
sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
sql += "%s and " % random.choice(qt_u_or_where)
sql += "%s " % random.choice(interp_where_j)
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -2065,7 +2057,6 @@ class TDTestCase:
sql += " from table_0 t1, table_1 t2 where t1.ts = t2.ts and "
#sql += "%s and " % random.choice(t_join_where)
sql += "%s " % interp_where_j[random.randint(0,5)]
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
@@ -2116,7 +2107,6 @@ class TDTestCase:
sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
#sql += "%s " % random.choice(interp_where_j)
sql += "%s " % interp_where_j[random.randint(0,5)]
- sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
diff --git a/tests/pytest/query/nestedQuery/nestedQuery_datacheck.py b/tests/pytest/query/nestedQuery/nestedQuery_datacheck.py
new file mode 100755
index 0000000000000000000000000000000000000000..308bf4f9e69828bf80728e320247a03303c7121e
--- /dev/null
+++ b/tests/pytest/query/nestedQuery/nestedQuery_datacheck.py
@@ -0,0 +1,637 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import string
+import os
+import sys
+import time
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.dnodes import tdDnodes
+from util.dnodes import *
+
+class TDTestCase:
+ updatecfgDict={'maxSQLLength':1048576}
+
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ os.system("rm -rf query/nestedQuery/nestedQuery_datacheck.py.sql")
+ now = time.time()
+ self.ts = 1630000000000
+ self.num = 100
+ self.fornum = 3
+
+ def get_random_string(self, length):
+ letters = string.ascii_lowercase
+ result_str = ''.join(random.choice(letters) for i in range(length))
+ return result_str
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root)-len("/build/bin")]
+ break
+ return buildPath
+
+ def restartDnodes(self):
+ tdDnodes.stop(1)
+ tdDnodes.start(1)
+
+ def dropandcreateDB(self,n):
+ for i in range(n):
+ tdSql.execute('''drop database if exists db ;''')
+ tdSql.execute('''create database db keep 36500;''')
+ tdSql.execute('''use db;''')
+
+ tdSql.execute('''create stable stable_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,t_float float , t_double double , t_ts timestamp);''')
+ tdSql.execute('''create stable stable_2
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp)
+ tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
+ t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,t_float float , t_double double , t_ts timestamp);''')
+
+
+ tdSql.execute('''create table table_0 using stable_1
+ tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
+ tdSql.execute('''create table table_1 using stable_1
+ tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
+ 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_2 using stable_1
+ tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
+ 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
+ tdSql.execute('''create table table_21 using stable_2
+ tags('table_21' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
+
+ #regular table
+ tdSql.execute('''create table regular_table_1
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp) ;''')
+ tdSql.execute('''create table regular_table_2
+ (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
+ q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp) ;''')
+
+
+ for i in range(self.num):
+ tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000, i, i, i, i, i, i, i, i, self.ts + i))
+ tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000+1, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, i, i, 1262304000001 + i))
+ tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000+2, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, -i, -i, 1577836800001 + i))
+
+ tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000, i, i, i, i, i, i, i, i, self.ts + i))
+
+ tdSql.execute('''insert into regular_table_2 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000 , 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
+ i, i, i, i, 1262304000001 + i))
+ tdSql.execute('''insert into regular_table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
+ % (self.ts + i*10000000 +2, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
+ i, i, -i, -i, 1577836800001 + i))
+
+ def regular1_checkall_0(self,sql):
+ tdLog.info(sql)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'2021-08-27 01:46:40.000')
+ tdSql.checkData(0,1,0)
+ tdSql.checkData(0,2,0)
+ tdSql.checkData(0,3,0)
+ tdSql.checkData(0,4,0)
+ tdSql.checkData(0,5,'False')
+ tdSql.checkData(0,6,'binary.0')
+ tdSql.checkData(0,7,'nchar.0')
+ tdSql.checkData(0,8,0)
+ tdSql.checkData(0,9,0)
+ tdSql.checkData(0,10,'2021-08-27 01:46:40.000')
+
+ def regular1_checkall_100(self,sql):
+ tdLog.info(sql)
+ tdSql.query(sql)
+ tdSql.checkData(99,0,'2021-09-07 12:46:40.000')
+ tdSql.checkData(99,1,99)
+ tdSql.checkData(99,2,99)
+ tdSql.checkData(99,3,99)
+ tdSql.checkData(99,4,99)
+ tdSql.checkData(99,5,'False')
+ tdSql.checkData(99,6,'binary.99')
+ tdSql.checkData(99,7,'nchar.99')
+ tdSql.checkData(99,8,99)
+ tdSql.checkData(99,9,99)
+ tdSql.checkData(99,10,'2021-08-27 01:46:40.099')
+
+ def regular_join_checkall_0(self,sql):
+ self.regular1_checkall_0(sql)
+ tdSql.checkData(0,11,'2021-08-27 01:46:40.000')
+ tdSql.checkData(0,12,2147483647)
+ tdSql.checkData(0,13,9223372036854775807)
+ tdSql.checkData(0,14,32767)
+ tdSql.checkData(0,15,127)
+ tdSql.checkData(0,16,'True')
+ tdSql.checkData(0,17,'binary1.0')
+ tdSql.checkData(0,18,'nchar1.0')
+ tdSql.checkData(0,19,0)
+ tdSql.checkData(0,20,0)
+ tdSql.checkData(0,21,'2010-01-01 08:00:00.001')
+
+ def regular_join_checkall_100(self,sql):
+ self.regular1_checkall_100(sql)
+ tdSql.checkData(99,11,'2021-09-07 12:46:40.000')
+ tdSql.checkData(99,12,2147483548)
+ tdSql.checkData(99,13,9223372036854775708)
+ tdSql.checkData(99,14,32668)
+ tdSql.checkData(99,15,28)
+ tdSql.checkData(99,16,'True')
+ tdSql.checkData(99,17,'binary1.99')
+ tdSql.checkData(99,18,'nchar1.99')
+ tdSql.checkData(99,19,99)
+ tdSql.checkData(99,20,99)
+ tdSql.checkData(99,21,'2010-01-01 08:00:00.100')
+
+ def stable1_checkall_0(self,sql):
+ self.regular1_checkall_0(sql)
+
+ def stable1_checkall_300(self,sql):
+ tdLog.info(sql)
+ tdSql.query(sql)
+ tdSql.checkData(299,0,'2021-09-07 12:46:40.002')
+ tdSql.checkData(299,1,-2147483548)
+ tdSql.checkData(299,2,-9223372036854775708)
+ tdSql.checkData(299,3,-32668)
+ tdSql.checkData(299,4,-28)
+ tdSql.checkData(299,5,'True')
+ tdSql.checkData(299,6,'binary2.99')
+ tdSql.checkData(299,7,'nchar2nchar2.99')
+ tdSql.checkData(299,8,-99)
+ tdSql.checkData(299,9,-99)
+ tdSql.checkData(299,10,'2010-01-01 08:00:00.100')
+
+ def stable_join_checkall_0(self,sql):
+ self.regular1_checkall_0(sql)
+ tdSql.checkData(0,22,'2021-08-27 01:46:40.000')
+ tdSql.checkData(0,23,0)
+ tdSql.checkData(0,24,0)
+ tdSql.checkData(0,25,0)
+ tdSql.checkData(0,26,0)
+ tdSql.checkData(0,27,'False')
+ tdSql.checkData(0,28,'binary.0')
+ tdSql.checkData(0,29,'nchar.0')
+ tdSql.checkData(0,30,0)
+ tdSql.checkData(0,31,0)
+ tdSql.checkData(0,32,'2021-08-27 01:46:40.000')
+
+ def stable_join_checkall_100(self,sql):
+ tdSql.checkData(99,0,'2021-09-07 12:46:40.000')
+ tdSql.checkData(99,1,99)
+ tdSql.checkData(99,2,99)
+ tdSql.checkData(99,3,99)
+ tdSql.checkData(99,4,99)
+ tdSql.checkData(99,5,'False')
+ tdSql.checkData(99,6,'binary.99')
+ tdSql.checkData(99,7,'nchar.99')
+ tdSql.checkData(99,8,99)
+ tdSql.checkData(99,9,99)
+ tdSql.checkData(99,10,'2021-08-27 01:46:40.099')
+ tdSql.checkData(99,22,'2021-09-07 12:46:40.000')
+ tdSql.checkData(99,23,99)
+ tdSql.checkData(99,24,99)
+ tdSql.checkData(99,25,99)
+ tdSql.checkData(99,26,99)
+ tdSql.checkData(99,27,'False')
+ tdSql.checkData(99,28,'binary.99')
+ tdSql.checkData(99,29,'nchar.99')
+ tdSql.checkData(99,30,99)
+ tdSql.checkData(99,31,99)
+ tdSql.checkData(99,32,'2021-08-27 01:46:40.099')
+
+
+ def run(self):
+ tdSql.prepare()
+ # test case for https://jira.taosdata.com:18080/browse/TD-5665
+ os.system("rm -rf nestedQuery.py.sql")
+ startTime = time.time()
+
+ dcDB = self.dropandcreateDB(1)
+
+ # regular column select
+ q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ']
+
+ # tag column select
+ t_select= ['*' , 'loc' ,'t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts ']
+
+ # regular and tag column select
+ qt_select= q_select + t_select
+
+ # distinct regular column select
+ dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' ,
+ 'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts ']
+
+ # distinct tag column select
+ dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' ,
+ 'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts ']
+
+ # distinct regular and tag column select
+ dqt_select= dq_select + dt_select
+
+ # special column select
+ s_r_select= ['_c0', '_C0' ]
+ s_s_select= ['tbname' , '_c0', '_C0' ]
+
+ # regular column where
+ q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647',
+ 'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -100000 and q_float <= 100000',
+ 'q_double >= -1000000000 and q_double <= 1000000000', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' ,
+ 'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1',
+ 'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767',
+ 'q_tinyint between -127 and 127 ','q_float between -100000 and 100000','q_double between -1000000000 and 1000000000']
+ #TD-6201 ,'q_bool between 0 and 1'
+
+ # regular column where for test union,join
+ q_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.q_bigint >= -9223372036854775807 and t1.q_bigint <= 9223372036854775807 and t2.q_bigint >= -9223372036854775807 and t2.q_bigint <= 9223372036854775807',
+ 't1.q_int <= 2147483647 and t1.q_int >= -2147483647 and t2.q_int <= 2147483647 and t2.q_int >= -2147483647',
+ 't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767',
+ 't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127',
+ 't1.q_float >= -100000 and t1.q_float <= 100000 and t2.q_float >= -100000 and t2.q_float <= 100000',
+ 't1.q_double >= -1000000000 and t1.q_double <= 1000000000 and t2.q_double >= -1000000000 and t2.q_double <= 1000000000',
+ 't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' ,
+ 't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' ,
+ 't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' ,
+ 't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807',
+ 't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647',
+ 't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767',
+ 't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -100000 and 100000 and t2.q_float between -100000 and 100000',
+ 't1.q_double between -1000000000 and 1000000000 and t2.q_double between -1000000000 and 1000000000']
+ #TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1']
+ #'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' ,
+
+ q_u_or_where = ['t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' ' ,
+ 't1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' ' , 't1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false' ,
+ 't1.q_bool in (0 , 1) or t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) or t2.q_bool in ( true , false)' , 't1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1' ,
+ 't1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807',
+ 't1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647',
+ 't1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767',
+ 't1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 ','t1.q_float between -100000 and 100000 or t2.q_float between -100000 and 100000',
+ 't1.q_double between -1000000000 and 1000000000 or t2.q_double between -1000000000 and 1000000000']
+
+ # tag column where
+ t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647',
+ 't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -100000 and t_float <= 100000',
+ 't_double >= -1000000000 and t_double <= 1000000000', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' ,
+ 't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1',
+ 't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767',
+ 't_tinyint between -127 and 127 ','t_float between -100000 and 100000','t_double between -1000000000 and 1000000000']
+ #TD-6201,'t_bool between 0 and 1'
+
+ # tag column where for test union,join | this is not support
+ t_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.t_bigint >= -9223372036854775807 and t1.t_bigint <= 9223372036854775807 and t2.t_bigint >= -9223372036854775807 and t2.t_bigint <= 9223372036854775807',
+ 't1.t_int <= 2147483647 and t1.t_int >= -2147483647 and t2.t_int <= 2147483647 and t2.t_int >= -2147483647',
+ 't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767',
+ 't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127',
+ 't1.t_float >= -100000 and t1.t_float <= 100000 and t2.t_float >= -100000 and t2.t_float <= 100000',
+ 't1.t_double >= -1000000000 and t1.t_double <= 1000000000 and t2.t_double >= -1000000000 and t2.t_double <= 1000000000',
+ 't1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' ' ,
+ 't1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' ' , 't1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false' ,
+ 't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , 't1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1',
+ 't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807',
+ 't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647',
+ 't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767',
+ 't1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127 ','t1.t_float between -100000 and 100000 and t2.t_float between -100000 and 100000',
+ 't1.t_double between -1000000000 and 1000000000 and t2.t_double between -1000000000 and 1000000000']
+ #TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1']
+
+ t_u_or_where = ['t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' ' ,
+ 't1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' ' , 't1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false' ,
+ 't1.t_bool in (0 , 1) or t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) or t2.t_bool in ( true , false)' , 't1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1',
+ 't1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807',
+ 't1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647',
+ 't1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767',
+ 't1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 ','t1.t_float between -100000 and 100000 or t2.t_float between -100000 and 100000',
+ 't1.t_double between -1000000000 and 1000000000 or t2.t_double between -1000000000 and 1000000000']
+
+ # regular and tag column where
+ qt_where = q_where + t_where
+ qt_u_where = q_u_where + t_u_where
+ # now,qt_u_or_where is not support
+ qt_u_or_where = q_u_or_where + t_u_or_where
+
+ # tag column where for test super join | this is support , 't1.t_bool = t2.t_bool ' ???
+ t_join_where = ['t1.t_bigint = t2.t_bigint ', 't1.t_int = t2.t_int ', 't1.t_smallint = t2.t_smallint ', 't1.t_tinyint = t2.t_tinyint ',
+ 't1.t_float = t2.t_float ', 't1.t_double = t2.t_double ', 't1.t_binary = t2.t_binary ' , 't1.t_nchar = t2.t_nchar ' ]
+
+ # session && fill
+ session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)']
+ session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)',
+ 'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)']
+
+ fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)']
+
+ state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)']
+ state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)',
+ 'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)']
+
+ # order by where
+ order_where = ['order by ts' , 'order by ts asc']
+ order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc']
+ order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ]
+ orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc']
+
+ # group by where,not include null-tag
+ group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint',
+ 'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint',
+ 'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' ]
+ having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0',
+ 'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0',
+ 'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0',
+ 'having STDDEV(q_int) > 0','having STDDEV(q_bigint) > 0','having STDDEV(q_smallint) > 0','having STDDEV(q_tinyint) > 0','having STDDEV(q_float) > 0','having STDDEV(q_double) > 0',
+ 'having TWA(q_int) > 0','having TWA(q_bigint) > 0','having TWA(q_smallint) > 0','having TWA(q_tinyint) > 0','having TWA(q_float) > 0','having TWA(q_double) > 0',
+ 'having IRATE(q_int) > 0','having IRATE(q_bigint) > 0','having IRATE(q_smallint) > 0','having IRATE(q_tinyint) > 0','having IRATE(q_float) > 0','having IRATE(q_double) > 0',
+ 'having MIN(q_int) > 0','having MIN(q_bigint) > 0','having MIN(q_smallint) > 0','having MIN(q_tinyint) > 0','having MIN(q_float) > 0','having MIN(q_double) > 0',
+ 'having MAX(q_int) > 0','having MAX(q_bigint) > 0','having MAX(q_smallint) > 0','having MAX(q_tinyint) > 0','having MAX(q_float) > 0','having MAX(q_double) > 0',
+ 'having FIRST(q_int) > 0','having FIRST(q_bigint) > 0','having FIRST(q_smallint) > 0','having FIRST(q_tinyint) > 0','having FIRST(q_float) > 0','having FIRST(q_double) > 0',
+ 'having LAST(q_int) > 0','having LAST(q_bigint) > 0','having LAST(q_smallint) > 0','having LAST(q_tinyint) > 0','having LAST(q_float) > 0','having LAST(q_double) > 0',
+ 'having APERCENTILE(q_int,10) > 0','having APERCENTILE(q_bigint,10) > 0','having APERCENTILE(q_smallint,10) > 0','having APERCENTILE(q_tinyint,10) > 0','having APERCENTILE(q_float,10) > 0','having APERCENTILE(q_double,10) > 0']
+ having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0',
+ 'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0',
+ 'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0',
+ 'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0',
+ 'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0']
+ having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0']
+
+ # limit offset where
+ limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200']
+ limit1_where = ['limit 1 offset 1' , 'limit 1' ]
+ limit_u_where = ['limit 100 offset 10' , 'limit 50' , 'limit 100' , 'limit 10' ]
+
+ # slimit soffset where
+ slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2']
+ slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ]
+
+ # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\]
+ # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile]
+ # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\]
+ # **_ns_** express is not support stable, therefore, separated from regular tables
+ # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval
+ # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname
+ # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname
+
+ # calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval
+ # select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile]
+
+ calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
+ 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
+ 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
+ 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ,
+ 'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
+ 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ,
+ 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' ,
+ 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
+ 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)']
+
+ calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
+ 'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
+ 'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
+ 'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ]
+
+ calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
+ 'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ,
+ 'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' ,
+ 'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
+ 'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)']
+
+
+ calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)']
+
+
+ calc_select_fill = ['INTERP(q_bool)' ,'INTERP(q_binary)' ,'INTERP(q_nchar)' ,'INTERP(q_ts)', 'INTERP(q_int)' ,'INTERP(*)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)']
+ interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\'']
+
+ #two table join
+ calc_select_in_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' ,
+ 'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' ,
+ 'first(t1.q_int)' , 'first(t1.q_bigint)' , 'first(t1.q_smallint)' , 'first(t1.q_tinyint)' , 'first(t1.q_float)' ,'first(t1.q_double)' ,'first(t1.q_binary)' ,'first(t1.q_nchar)' ,'first(t1.q_bool)' ,'first(t1.q_ts)' ,
+ 'last(t1.q_int)' , 'last(t1.q_bigint)' , 'last(t1.q_smallint)' , 'last(t1.q_tinyint)' , 'last(t1.q_float)' ,'last(t1.q_double)' , 'last(t1.q_binary)' ,'last(t1.q_nchar)' ,'last(t1.q_bool)' ,'last(t1.q_ts)' ,
+ 'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' ,
+ 'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' ,
+ 'first(t2.q_int)' , 'first(t2.q_bigint)' , 'first(t2.q_smallint)' , 'first(t2.q_tinyint)' , 'first(t2.q_float)' ,'first(t2.q_double)' ,'first(t2.q_binary)' ,'first(t2.q_nchar)' ,'first(t2.q_bool)' ,'first(t2.q_ts)' ,
+ 'last(t2.q_int)' , 'last(t2.q_bigint)' , 'last(t2.q_smallint)' , 'last(t2.q_tinyint)' , 'last(t2.q_float)' ,'last(t2.q_double)' , 'last(t2.q_binary)' ,'last(t2.q_nchar)' ,'last(t2.q_bool)' ,'last(t2.q_ts)']
+
+ calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' ,
+ 'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' ,
+ 'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' ,
+ 'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' ,
+ 'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' ,
+ 'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' ,
+ 'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' ,
+ 'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' ,
+ 'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' ,
+ 'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)']
+
+ calc_select_all_j = calc_select_in_ts_j + calc_select_in_j
+
+ calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' ,
+ 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)']
+
+
+ calc_select_fill_j = ['INTERP(t1.q_bool)' ,'INTERP(t1.q_binary)' ,'INTERP(t1.q_nchar)' ,'INTERP(t1.q_ts)', 'INTERP(t1.q_int)' ,'INTERP(t1.*)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' ,
+ 'INTERP(t2.q_bool)' ,'INTERP(t2.q_binary)' ,'INTERP(t2.q_nchar)' ,'INTERP(t2.q_ts)', 'INTERP(t2.q_int)' ,'INTERP(t2.*)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)']
+ interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' ,
+ 't1.tbname in (\'table_1\') and t1.ts = now' ,'t1.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t1.ts = \'2020-09-13 20:26:40.000\'','t1.tbname like \'table%\' and t1.ts = \'2020-09-13 20:26:40.002\'',
+ 't2.tbname in (\'table_1\') and t2.ts = now' ,'t2.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t2.ts = \'2020-09-13 20:26:40.000\'','t2.tbname like \'table%\' and t2.ts = \'2020-09-13 20:26:40.002\'']
+
+ # calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname APERCENTILE\PERCENTILE
+ # aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\]
+ calc_aggregate_all = ['count(*)' , 'count(q_int)' ,'count(q_bigint)' , 'count(q_smallint)' ,'count(q_tinyint)' ,'count(q_float)' ,
+ 'count(q_double)' ,'count(q_binary)' ,'count(q_nchar)' ,'count(q_bool)' ,'count(q_ts)' ,
+ 'avg(q_int)' ,'avg(q_bigint)' , 'avg(q_smallint)' ,'avg(q_tinyint)' ,'avg(q_float)' ,'avg(q_double)' ,
+ 'sum(q_int)' ,'sum(q_bigint)' , 'sum(q_smallint)' ,'sum(q_tinyint)' ,'sum(q_float)' ,'sum(q_double)' ,
+ 'STDDEV(q_int)' ,'STDDEV(q_bigint)' , 'STDDEV(q_smallint)' ,'STDDEV(q_tinyint)' ,'STDDEV(q_float)' ,'STDDEV(q_double)',
+ 'APERCENTILE(q_int,10)' ,'APERCENTILE(q_bigint,20)' , 'APERCENTILE(q_smallint,30)' ,'APERCENTILE(q_tinyint,40)' ,'APERCENTILE(q_float,50)' ,'APERCENTILE(q_double,60)']
+
+ calc_aggregate_regular = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' ,
+ 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ,
+ 'LEASTSQUARES(q_int,15,3)' , 'LEASTSQUARES(q_bigint,10,1)' , 'LEASTSQUARES(q_smallint,20,3)' ,'LEASTSQUARES(q_tinyint,10,4)' ,'LEASTSQUARES(q_float,6,4)' ,'LEASTSQUARES(q_double,3,1)' ,
+ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)']
+
+ calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' ,
+ 'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ]
+
+ #two table join
+ calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' ,
+ 'count(t1.q_double)' ,'count(t1.q_binary)' ,'count(t1.q_nchar)' ,'count(t1.q_bool)' ,'count(t1.q_ts)' ,
+ 'avg(t1.q_int)' ,'avg(t1.q_bigint)' , 'avg(t1.q_smallint)' ,'avg(t1.q_tinyint)' ,'avg(t1.q_float)' ,'avg(t1.q_double)' ,
+ 'sum(t1.q_int)' ,'sum(t1.q_bigint)' , 'sum(t1.q_smallint)' ,'sum(t1.q_tinyint)' ,'sum(t1.q_float)' ,'sum(t1.q_double)' ,
+ 'STDDEV(t1.q_int)' ,'STDDEV(t1.q_bigint)' , 'STDDEV(t1.q_smallint)' ,'STDDEV(t1.q_tinyint)' ,'STDDEV(t1.q_float)' ,'STDDEV(t1.q_double)',
+ 'APERCENTILE(t1.q_int,10)' ,'APERCENTILE(t1.q_bigint,20)' , 'APERCENTILE(t1.q_smallint,30)' ,'APERCENTILE(t1.q_tinyint,40)' ,'APERCENTILE(t1.q_float,50)' ,'APERCENTILE(t1.q_double,60)' ,
+ 'count(t2.*)' , 'count(t2.q_int)' ,'count(t2.q_bigint)' , 'count(t2.q_smallint)' ,'count(t2.q_tinyint)' ,'count(t2.q_float)' ,
+ 'count(t2.q_double)' ,'count(t2.q_binary)' ,'count(t2.q_nchar)' ,'count(t2.q_bool)' ,'count(t2.q_ts)' ,
+ 'avg(t2.q_int)' ,'avg(t2.q_bigint)' , 'avg(t2.q_smallint)' ,'avg(t2.q_tinyint)' ,'avg(t2.q_float)' ,'avg(t2.q_double)' ,
+ 'sum(t2.q_int)' ,'sum(t2.q_bigint)' , 'sum(t2.q_smallint)' ,'sum(t2.q_tinyint)' ,'sum(t2.q_float)' ,'sum(t2.q_double)' ,
+ 'STDDEV(t2.q_int)' ,'STDDEV(t2.q_bigint)' , 'STDDEV(t2.q_smallint)' ,'STDDEV(t2.q_tinyint)' ,'STDDEV(t2.q_float)' ,'STDDEV(t2.q_double)',
+ 'APERCENTILE(t2.q_int,10)' ,'APERCENTILE(t2.q_bigint,20)' , 'APERCENTILE(t2.q_smallint,30)' ,'APERCENTILE(t2.q_tinyint,40)' ,'APERCENTILE(t2.q_float,50)' ,'APERCENTILE(t2.q_double,60)']
+
+ calc_aggregate_regular_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' ,
+ 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' ,
+ 'LEASTSQUARES(t1.q_int,15,3)' , 'LEASTSQUARES(t1.q_bigint,10,1)' , 'LEASTSQUARES(t1.q_smallint,20,3)' ,'LEASTSQUARES(t1.q_tinyint,10,4)' ,'LEASTSQUARES(t1.q_float,6,4)' ,'LEASTSQUARES(t1.q_double,3,1)' ,
+ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' ,
+ 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' ,
+ 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)',
+ 'LEASTSQUARES(t2.q_int,15,3)' , 'LEASTSQUARES(t2.q_bigint,10,1)' , 'LEASTSQUARES(t2.q_smallint,20,3)' ,'LEASTSQUARES(t2.q_tinyint,10,4)' ,'LEASTSQUARES(t2.q_float,6,4)' ,'LEASTSQUARES(t2.q_double,3,1)' ,
+ 'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)']
+
+ calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' ,
+ 'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' ,
+ 'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' ,
+ 'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ]
+
+ # calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname
+ # calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\]
+ calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' ,
+ '(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))']
+ calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' ,
+ 'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ]
+ calc_calculate_groupbytbname = calc_calculate_regular
+
+ #two table join
+ calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' ,
+ 'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' ,
+ '(SPREAD(t1.q_int) + SPREAD(t1.q_bigint))' , '(SPREAD(t1.q_tinyint) - SPREAD(t1.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_tinyint))',
+ '(SPREAD(t2.q_int) + SPREAD(t2.q_bigint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t2.q_double) * SPREAD(t2.q_tinyint))' , '(SPREAD(t2.q_double) / SPREAD(t2.q_tinyint))',
+ '(SPREAD(t1.q_int) + SPREAD(t1.q_smallint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_float))']
+ calc_calculate_regular_j = ['DIFF(t1.q_int)' ,'DIFF(t1.q_bigint)' , 'DIFF(t1.q_smallint)' ,'DIFF(t1.q_tinyint)' ,'DIFF(t1.q_float)' ,'DIFF(t1.q_double)' ,
+ 'DERIVATIVE(t1.q_int,15s,0)' , 'DERIVATIVE(t1.q_bigint,10s,1)' , 'DERIVATIVE(t1.q_smallint,20s,0)' ,'DERIVATIVE(t1.q_tinyint,10s,1)' ,'DERIVATIVE(t1.q_float,6s,0)' ,'DERIVATIVE(t1.q_double,3s,1)' ,
+ 'DIFF(t2.q_int)' ,'DIFF(t2.q_bigint)' , 'DIFF(t2.q_smallint)' ,'DIFF(t2.q_tinyint)' ,'DIFF(t2.q_float)' ,'DIFF(t2.q_double)' ,
+ 'DERIVATIVE(t2.q_int,15s,0)' , 'DERIVATIVE(t2.q_bigint,10s,1)' , 'DERIVATIVE(t2.q_smallint,20s,0)' ,'DERIVATIVE(t2.q_tinyint,10s,1)' ,'DERIVATIVE(t2.q_float,6s,0)' ,'DERIVATIVE(t2.q_double,3s,1)' ]
+ calc_calculate_groupbytbname_j = calc_calculate_regular_j
+
+
+ #inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all
+ interval_sliding = ['interval(4w) sliding(1w) ','interval(1w) sliding(1d) ','interval(1d) sliding(1h) ' ,
+ 'interval(1h) sliding(1m) ','interval(1m) sliding(1s) ','interval(1s) sliding(10a) ',
+ 'interval(1y) ','interval(1n) ','interval(1w) ','interval(1d) ','interval(1h) ','interval(1m) ','interval(1s) ' ,'interval(10a)',
+ 'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)']
+
+
+ for i in range(self.fornum):
+ tdSql.query("select 1-1 from table_0;")
+ sql = "select count(*) from (select count(*) from stable_1 where ts>= 1620000000000 interval(1d) group by tbname) interval(1d);"
+ tdLog.info(sql)
+ tdSql.query(sql)
+ tdSql.checkData(0,0,'2021-08-27 00:00:00.000')
+ tdSql.checkData(0,1,3)
+ tdSql.checkData(1,0,'2021-08-28 00:00:00.000')
+ tdSql.checkData(1,1,3)
+ tdSql.checkData(2,0,'2021-08-29 00:00:00.000')
+ tdSql.checkRows(12)
+
+ #sql = "select * from ( select * from regular_table_1 where q_tinyint >= -127 and q_tinyint <= 127 order by ts );"
+ tdSql.query("select 1-2 from table_0;")
+ sql = "select * from ( select * from regular_table_1 where "
+ sql += "%s );" % random.choice(q_where)
+ datacheck = self.regular1_checkall_0(sql)
+ tdSql.checkRows(100)
+ datacheck = self.regular1_checkall_100(sql)
+
+ #sql = "select * from ( select * from regular_table_1 ) where q_binary like 'binary%' or q_binary = '0' order by ts asc ;"
+ tdSql.query("select 1-3 from table_0;")
+ sql = "select * from ( select * from regular_table_1 ) where "
+ sql += "%s ;" % random.choice(q_where)
+ datacheck = self.regular1_checkall_0(sql)
+ tdSql.checkRows(100)
+ datacheck = self.regular1_checkall_100(sql)
+
+ #sql = select * from ( select * from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and t1.q_double >= -1000000000 and t1.q_double <= 1000000000 and t2.q_double >= -1000000000 and t2.q_double <= 1000000000 order by t2.ts asc );;
+ tdSql.query("select 1-4 from table_0;")
+ sql = "select * from ( select * from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
+ sql += "%s )" % random.choice(q_u_where)
+ datacheck = self.regular_join_checkall_0(sql)
+ tdSql.checkRows(100)
+ datacheck = self.regular_join_checkall_100(sql)
+
+
+
+
+ #sql = "select * from ( select * from stable_1 where q_tinyint >= -127 and q_tinyint <= 127 order by ts );"
+ tdSql.query("select 2-1 from stable_1;")
+ sql = "select * from ( select * from stable_1 where "
+ sql += "%s );" % random.choice(qt_where)
+ datacheck = self.stable1_checkall_0(sql)
+ tdSql.checkRows(300)
+ datacheck = self.stable1_checkall_300(sql)
+
+ #sql = "select * from ( select * from stable_1 ) order by ts asc ;"
+ tdSql.query("select 2-2 from stable_1;")
+ sql = "select * from ( select * from stable_1 ) where "
+ sql += "%s ;" % random.choice(qt_where)
+ datacheck = self.stable1_checkall_0(sql)
+ tdSql.checkRows(300)
+ datacheck = self.stable1_checkall_300(sql)
+
+ #sql = "select * from ( select * from table_0 ) where q_binary like 'binary%' or q_binary = '0' order by ts asc ;"
+ tdSql.query("select 2-3 from stable_1;")
+ sql = "select * from ( select * from table_0 ) where "
+ sql += "%s ;" % random.choice(q_where)
+ datacheck = self.regular1_checkall_0(sql)
+ tdSql.checkRows(100)
+ datacheck = self.regular1_checkall_100(sql)
+
+ #sql = "select * from ( select * from table_0 where q_binary like 'binary%' or q_binary = '0' order by ts asc );"
+ tdSql.query("select 2-4 from stable_1;")
+ sql = "select * from ( select * from table_0 where "
+ sql += "%s );" % random.choice(q_where)
+ datacheck = self.regular1_checkall_0(sql)
+ tdSql.checkRows(100)
+ datacheck = self.regular1_checkall_100(sql)
+
+ #sql = select * from ( select * from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and t1.t_int = t2.t_int ) ;;
+ tdSql.query("select 2-5 from stable_1;")
+ sql = "select * from ( select * from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
+ sql += "%s )" % random.choice(t_join_where)
+ datacheck = self.stable_join_checkall_0(sql)
+ tdSql.checkRows(100)
+ datacheck = self.stable_join_checkall_100(sql)
+
+
+ endTime = time.time()
+ print("total time %ds" % (endTime - startTime))
+
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/pytest/query/operator_cost.py b/tests/pytest/query/operator_cost.py
index f22cfcd4ec709b1d4440065fab398979afeb3adc..e5c8868ad4d54e32e3458ebb02e4f1118d57c6c9 100644
--- a/tests/pytest/query/operator_cost.py
+++ b/tests/pytest/query/operator_cost.py
@@ -25,8 +25,7 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
- now = time.time()
- self.ts = int(round(now * 1000))
+ self.ts = 1633333333000.
self.num = 10
def run(self):
@@ -534,4 +533,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/query.py b/tests/pytest/query/query.py
index ed3740fcb52f8c885bd99d74053ba26a328968a9..8e79fc5f686d77aa276da5bca7d9493ff1a00ffb 100644
--- a/tests/pytest/query/query.py
+++ b/tests/pytest/query/query.py
@@ -74,7 +74,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 'dev_01')
tdSql.checkData(1, 2, 1)
- ## test case for https://jira.taosdata.com:18080/browse/TD-2488
+ ## TD-2488
tdSql.execute("create table m1(ts timestamp, k int) tags(a int)")
tdSql.execute("create table t1 using m1 tags(1)")
tdSql.execute("create table t2 using m1 tags(2)")
@@ -93,7 +93,7 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
- ## test case for https://jira.taosdata.com:18080/browse/TD-1930
+ ## TD-1930
tdSql.execute("create table tb(ts timestamp, c1 int, c2 binary(10), c3 nchar(10), c4 float, c5 bool)")
for i in range(10):
tdSql.execute(
@@ -126,7 +126,7 @@ class TDTestCase:
tdSql.query("select * from tb where c5 = 'true' ")
tdSql.checkRows(5)
- # For jira: https://jira.taosdata.com:18080/browse/TD-2850
+ # TD-2850
tdSql.execute("create database 'Test' ")
tdSql.execute("use 'Test' ")
tdSql.execute("create table 'TB'(ts timestamp, 'Col1' int) tags('Tag1' int)")
@@ -136,7 +136,7 @@ class TDTestCase:
tdSql.query("select * from tb0")
tdSql.checkRows(1)
- # For jira:https://jira.taosdata.com:18080/browse/TD-6314
+ # TD-6314
tdSql.execute("use db")
tdSql.execute("create stable stb_001(ts timestamp,v int) tags(c0 int)")
tdSql.execute("insert into stb1 using stb_001 tags(1) values(now,1)")
@@ -145,10 +145,26 @@ class TDTestCase:
- #For jira: https://jira.taosdata.com:18080/browse/TD-6387
+ #TD-6387
tdLog.info("case for bug_6387")
self.bug_6387()
+ #JIRA TS-583
+ tdLog.info("case for JIRA TS-583")
+ tdSql.execute("create database test2")
+ tdSql.execute("use test2")
+ tdSql.execute("create table stb(ts timestamp, c1 int) tags(t1 binary(120))")
+ tdSql.execute("create table t0 using stb tags('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')")
+
+ tdSql.query("show create table t0")
+ tdSql.checkRows(1)
+
+ tdSql.execute("create table stb2(ts timestamp, c1 int) tags(t1 nchar(120))")
+ tdSql.execute("create table t1 using stb2 tags('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')")
+
+ tdSql.query("show create table t1")
+ tdSql.checkRows(1)
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/query/queryDiffColsTagsAndOr.py b/tests/pytest/query/queryDiffColsTagsAndOr.py
new file mode 100644
index 0000000000000000000000000000000000000000..c84b86aa2c97cb2e57b8f04ef1c579f317c7a9a4
--- /dev/null
+++ b/tests/pytest/query/queryDiffColsTagsAndOr.py
@@ -0,0 +1,989 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+from util.common import tdCom
+class TDTestCase:
+ def init(self, conn, logSql):
+ ## add for TD-6672
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def insertData(self, tb_name):
+ insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)',
+ f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2)',
+ f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3)',
+ f'insert into {tb_name} values ("2021-01-09 12:00:00", 1, 2, 4, 3, 1.1, 1.1, "binary", "nchar", true, 4)',
+ f'insert into {tb_name} values ("2021-01-11 12:00:00", 1, 2, 5, 5, 1.1, 1.1, "binary", "nchar", true, 5)',
+ f'insert into {tb_name} values ("2021-01-13 12:00:00", 1, 2, 1, 3, 6.6, 1.1, "binary", "nchar", true, 6)',
+ f'insert into {tb_name} values ("2021-01-15 12:00:00", 1, 2, 1, 3, 1.1, 7.7, "binary", "nchar", true, 7)',
+ f'insert into {tb_name} values ("2021-01-17 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary8", "nchar", true, 8)',
+ f'insert into {tb_name} values ("2021-01-19 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar9", true, 9)',
+ f'insert into {tb_name} values ("2021-01-21 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar", false, 10)',
+ f'insert into {tb_name} values ("2021-01-23 12:00:00", 1, 3, 1, 3, 1.1, 1.1, Null, Null, false, 11)'
+ ]
+ for sql in insert_sql_list:
+ tdSql.execute(sql)
+
+ def initTb(self):
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(
+ f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int)")
+ self.insertData(tb_name)
+ return tb_name
+
+ def initStb(self):
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(8, "letters")
+ tdSql.execute(
+ f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 int)")
+ for i in range(1, 6):
+ tdSql.execute(
+ f'CREATE TABLE {tb_name}_sub_{i} using {tb_name} tags ({i}, {i}, {i}, {i}, {i}.{i}, {i}.{i}, "binary{i}", "nchar{i}", true, {i})')
+ self.insertData(f'{tb_name}_sub_{i}')
+ return tb_name
+
+ def initTwoStb(self):
+ tdCom.cleanTb()
+ tb_name = tdCom.getLongName(8, "letters")
+ tb_name1 = f'{tb_name}1'
+ tb_name2 = f'{tb_name}2'
+ tdSql.execute(
+ f"CREATE TABLE {tb_name1} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 int)")
+ tdSql.execute(
+ f"CREATE TABLE {tb_name2} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 int)")
+ tdSql.execute(
+ f'CREATE TABLE {tb_name1}_sub using {tb_name1} tags (1, 1, 1, 1, 1.1, 1.1, "binary1", "nchar1", true, 1)')
+ tdSql.execute(
+ f'CREATE TABLE {tb_name2}_sub using {tb_name2} tags (1, 1, 1, 1, 1.1, 1.1, "binary1", "nchar1", true, 1)')
+ self.insertData(f'{tb_name1}_sub')
+ self.insertData(f'{tb_name2}_sub')
+ return tb_name
+
+ def queryLastC10(self, query_sql, multi=False):
+ if multi:
+ res = tdSql.query(query_sql.replace('c10', 'last(*)'), True)
+ else:
+ res = tdSql.query(query_sql.replace('*', 'last(*)'), True)
+ return int(res[0][-1])
+
+ def queryTsCol(self, tb_name):
+ # ts and ts
+ query_sql = f'select * from {tb_name} where ts > "2021-01-11 12:00:00" or ts < "2021-01-13 12:00:00"'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and ts <= "2021-01-13 12:00:00"'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 6)
+
+ ## ts or and tinyint col
+ query_sql = f'select * from {tb_name} where ts > "2021-01-11 12:00:00" or c1 = 2'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c1 != 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ ## ts or and smallint col
+ query_sql = f'select * from {tb_name} where ts <> "2021-01-11 12:00:00" or c2 = 10'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c2 <= 1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 1)
+
+ ## ts or and int col
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" or c3 = 4'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## ts or and big col
+ query_sql = f'select * from {tb_name} where ts is Null or c4 = 5'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts is not Null and c4 = 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 3)
+
+ ## ts or and float col
+ query_sql = f'select * from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c5 = 6.6'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c5 = 1.1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## ts or and double col
+ query_sql = f'select * from {tb_name} where ts between "2021-01-17 12:00:00" and "2021-01-23 12:00:00" or c6 = 7.7'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c6 = 1.1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## ts or and binary col
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c7 like "binary_"'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c7 in ("binary")'
+ tdSql.query(query_sql)
+ tdSql.checkRows(5)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ ## ts or and nchar col
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c8 like "nchar%"'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c8 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## ts or and bool col
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" or c9=false'
+ tdSql.error(query_sql)
+
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(5)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 9)
+
+ ## multi cols
+ query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and c1 != 2 and c2 >= 2 and c3 <> 4 and c4 < 4 and c5 > 1 and c6 >= 1.1 and c7 is not Null and c8 = "nchar" and c9=false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 10)
+
+ def queryTsTag(self, tb_name):
+ ## ts and tinyint col
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and t1 != 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(20)
+
+ ## ts and smallint col
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and t2 <= 1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(5)
+
+ ## ts or and int col
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and t3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+
+ ## ts or and big col
+ query_sql = f'select * from {tb_name} where ts is not Null and t4 = 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+
+ ## ts or and float col
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and t5 = 1.1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+
+ ## ts or and double col
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and t6 = 1.1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## ts or and binary col
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and t7 in ("binary1")'
+ tdSql.query(query_sql)
+ tdSql.checkRows(5)
+
+ ## ts or and nchar col
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and t8 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(35)
+
+ ## ts or and bool col
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(35)
+
+ ## multi cols
+ query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and t1 != 2 and t2 >= 2 and t3 <> 4 and t4 < 4 and t5 > 1 and t6 >= 1.1 and t7 is not Null and t8 = "nchar3" and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(10)
+
+ def queryTsColTag(self, tb_name):
+ ## ts and tinyint col tag
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c1 >= 2 and t1 != 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+
+ ## ts and smallint col tag
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c2 >=3 and t2 <= 1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+
+ ## ts or and int col tag
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c3 < 3 and t3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(3)
+
+ ## ts or and big col tag
+ query_sql = f'select * from {tb_name} where ts is not Null and c4 <> 1 and t4 = 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+
+ ## ts or and float col tag
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00" and c5 is not Null and t5 = 1.1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+
+ ## ts or and double col tag
+ query_sql = f'select * from {tb_name} where ts < "2021-01-11 12:00:00"and c6 = 1.1 and t6 = 1.1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## ts or and binary col tag
+ query_sql = f'select * from {tb_name} where ts <= "2021-01-11 12:00:00" and c7 is Null and t7 in ("binary1")'
+ tdSql.query(query_sql)
+ tdSql.checkRows(0)
+
+ ## ts or and nchar col tag
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c8 like "nch%" and t8 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(30)
+
+ ## ts or and bool col tag
+ query_sql = f'select * from {tb_name} where ts >= "2021-01-11 12:00:00" and c9=false and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(10)
+
+ ## multi cols tag
+ query_sql = f'select * from {tb_name} where ts > "2021-01-03 12:00:00" and c1 = 1 and c2 != 3 and c3 <= 2 and c4 >= 2 and c5 in (1.2, 1.1) and c6 < 2.2 and c7 like "bina%" and c8 is not Null and c9 = true and t1 != 2 and t2 >= 2 and t3 <> 4 and t4 < 4 and t5 > 1 and t6 >= 1.1 and t7 is not Null and t8 = "nchar3" and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+
+ def queryFullColType(self, tb_name):
+ ## != or and
+ query_sql = f'select * from {tb_name} where c1 != 1 or c2 = 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(3)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c1 != 1 and c2 = 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 2)
+
+ ## <> or and
+ query_sql = f'select * from {tb_name} where c1 <> 1 or c3 = 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 2)
+
+ query_sql = f'select * from {tb_name} where c1 <> 2 and c3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## >= or and
+ query_sql = f'select * from {tb_name} where c1 >= 2 or c3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ query_sql = f'select * from {tb_name} where c1 >= 2 and c3 = 1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 2)
+
+ ## <= or and
+ query_sql = f'select * from {tb_name} where c1 <= 1 or c3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(10)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c1 <= 1 and c3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 4)
+
+ ## <> or and is Null
+ query_sql = f'select * from {tb_name} where c1 <> 1 or c7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c1 <> 2 and c7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## > or and is not Null
+ query_sql = f'select * from {tb_name} where c2 > 2 or c8 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c2 > 2 and c8 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 3)
+
+ ## > or < or >= or <= or != or <> or = Null
+ query_sql = f'select * from {tb_name} where c1 > 1 or c2 < 2 or c3 >= 4 or c4 <= 2 or c5 != 1.1 or c6 <> 1.1 or c7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(8)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c1 = 1 and c2 > 1 and c3 >= 1 and c4 <= 5 and c5 != 6.6 and c6 <> 7.7 and c7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## tiny small int big or
+ query_sql = f'select * from {tb_name} where c1 = 2 or c2 = 3 or c3 = 4 or c4 = 5'
+ tdSql.query(query_sql)
+ tdSql.checkRows(5)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c1 = 1 and c2 = 2 and c3 = 1 and c4 = 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(5)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 10)
+
+ ## float double binary nchar bool or
+ query_sql = f'select * from {tb_name} where c5=6.6 or c6=7.7 or c7="binary8" or c8="nchar9" or c9=false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(6)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c5=1.1 and c6=7.7 and c7="binary" and c8="nchar" and c9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 7)
+
+ ## all types or
+ query_sql = f'select * from {tb_name} where c1=2 or c2=3 or c3=4 or c4=5 or c5=6.6 or c6=7.7 or c7 nmatch "binary[134]" or c8="nchar9" or c9=false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where c1=1 and c2=2 and c3=1 and c4=3 and c5=1.1 and c6=1.1 and c7 match "binary[28]" and c8 in ("nchar") and c9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 8)
+
+ query_sql = f'select * from {tb_name} where c1=1 and c2=2 or c3=1 and c4=3 and c5=1.1 and c6=1.1 and c7 match "binary[28]" and c8 in ("nchar") and c9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(7)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 10)
+
+ def queryFullTagType(self, tb_name):
+ ## != or and
+ query_sql = f'select * from {tb_name} where t1 != 1 or t2 = 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 != 1 and t2 = 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## <> or and
+ query_sql = f'select * from {tb_name} where t1 <> 1 or t3 = 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 <> 2 and t3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## >= or and
+ query_sql = f'select * from {tb_name} where t1 >= 2 or t3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 >= 1 and t3 = 1'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## <= or and
+ query_sql = f'select * from {tb_name} where t1 <= 1 or t3 = 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(22)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 <= 3 and t3 = 2'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## <> or and is Null
+ query_sql = f'select * from {tb_name} where t1 <> 1 or t7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 <> 2 and t7 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## > or and is not Null
+ query_sql = f'select * from {tb_name} where t2 > 2 or t8 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(55)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t2 > 2 and t8 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(33)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## > or < or >= or <= or != or <> or = Null
+ query_sql = f'select * from {tb_name} where t1 > 1 or t2 < 2 or t3 >= 4 or t4 <= 2 or t5 != 1.1 or t6 <> 1.1 or t7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(55)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 >= 1 and t2 > 1 and t3 >= 1 and t4 <= 5 and t5 != 6.6 and t6 <> 7.7 and t7 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## tiny small int big or and
+ query_sql = f'select * from {tb_name} where t1 = 2 or t2 = 3 or t3 = 4 or t4 = 5'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1 = 1 and t2 = 2 and t3 = 1 and t4 = 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(0)
+
+ ## float double binary nchar bool or and
+ query_sql = f'select * from {tb_name} where t5=2.2 or t6=7.7 or t7="binary8" or t8="nchar9" or t9=false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t5=2.2 and t6=2.2 and t7="binary2" and t8="nchar2" and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## all types or and
+ query_sql = f'select * from {tb_name} where t1=2 or t2=3 or t3=4 or t4=5 or t5=6.6 or t6=7.7 or t7 nmatch "binary[134]" or t8="nchar9" or t9=false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1=1 and t2=1 and t3>=1 and t4!=2 and t5=1.1 and t6=1.1 and t7 match "binary[18]" and t8 in ("nchar1") and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ query_sql = f'select * from {tb_name} where t1=1 and t2=1 or t3>=1 and t4!=2 and t5=1.1 and t6=1.1 and t7 match "binary[18]" and t8 in ("nchar1") and t9=true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ def queryColMultiExpression(self, tb_name):
+ ## condition_A and condition_B or condition_C (> < >=)
+ query_sql = f'select * from {tb_name} where c1 > 2 and c2 < 4 or c3 >= 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ ## (condition_A and condition_B) or condition_C (<= != <>)
+ query_sql = f'select * from {tb_name} where (c1 <= 1 and c2 != 2) or c4 <> 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## condition_A and (condition_B or condition_C) (Null not Null)
+ query_sql = f'select * from {tb_name} where c1 is not Null and (c6 = 7.7 or c8 is Null)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## condition_A or condition_B and condition_C (> < >=)
+ query_sql = f'select * from {tb_name} where c1 > 2 or c2 < 4 and c3 >= 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ ## (condition_A or condition_B) and condition_C (<= != <>)
+ query_sql = f'select * from {tb_name} where (c1 <= 1 or c2 != 2) and c4 <> 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ ## condition_A or (condition_B and condition_C) (Null not Null)
+ query_sql = f'select * from {tb_name} where c6 >= 7.7 or (c1 is not Null and c3 =5)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 7)
+
+ ## condition_A or (condition_B and condition_C) or condition_D (> != < Null)
+ query_sql = f'select * from {tb_name} where c1 != 1 or (c2 >2 and c3 < 1) or c7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## condition_A and (condition_B or condition_C) and condition_D (>= = <= not Null)
+ query_sql = f'select * from {tb_name} where c4 >= 4 and (c1 = 2 or c5 <= 1.1) and c7 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ ## (condition_A and condition_B) or (condition_C or condition_D) (Null >= > =)
+ query_sql = f'select * from {tb_name} where (c8 is Null and c1 >= 1) or (c3 > 3 or c4 =2)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 11)
+
+ ## (condition_A or condition_B) or condition_C or (condition_D and condition_E) (>= <= = not Null <>)
+ query_sql = f'select * from {tb_name} where (c1 >= 2 or c2 <= 1) or c3 = 4 or (c7 is not Null and c6 <> 1.1)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(4)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 7)
+
+ ## condition_A or (condition_B and condition_C) or (condition_D and condition_E) and condition_F
+ query_sql = f'select * from {tb_name} where c1 != 1 or (c2 <= 1 and c3 <4) or (c3 >= 4 or c7 is not Null) and c9 <> true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(3)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 10)
+
+ ## (condition_A or (condition_B and condition_C) or (condition_D and condition_E)) and condition_F
+ query_sql = f'select * from {tb_name} where (c1 != 1 or (c2 <= 2 and c3 >= 4) or (c3 >= 4 or c7 is not Null)) and c9 != false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(9)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 9)
+
+ ## (condition_A or condition_B) or (condition_C or condition_D) and (condition_E or condition_F or condition_G)
+ query_sql = f'select * from {tb_name} where c1 != 1 or (c2 <= 3 and c3 > 4) and c3 <= 5 and (c7 is not Null and c9 != false)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(self.queryLastC10(query_sql), 5)
+
+ def queryTagMultiExpression(self, tb_name):
+ ## condition_A and condition_B or condition_C (> < >=)
+ query_sql = f'select * from {tb_name} where t1 > 2 and t2 < 4 or t3 >= 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(33)
+
+ ## (condition_A and condition_B) or condition_C (<= != <>)
+ query_sql = f'select * from {tb_name} where (t1 <= 1 and t2 != 2) or t4 <> 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+
+ ## condition_A and (condition_B or condition_C) (Null not Null)
+ query_sql = f'select * from {tb_name} where t1 is not Null and (t6 = 7.7 or t8 is not Null)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(55)
+
+ ## condition_A or condition_B and condition_C (> < >=)
+ query_sql = f'select * from {tb_name} where t1 > 2 or t2 < 4 and t3 >= 4'
+ tdSql.query(query_sql)
+ tdSql.checkRows(33)
+
+ ## (condition_A or condition_B) and condition_C (<= != <>)
+ query_sql = f'select * from {tb_name} where (t1 <= 1 or t2 != 2) and t4 <> 3'
+ tdSql.query(query_sql)
+ tdSql.checkRows(33)
+
+ ## condition_A or (condition_B and condition_C) (Null not Null)
+ query_sql = f'select * from {tb_name} where t6 >= 7.7 or (t1 is not Null and t3 =5)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+
+ ## condition_A or (condition_B and condition_C) or condition_D (> != < Null)
+ query_sql = f'select * from {tb_name} where t1 != 1 or (t2 >2 and t3 < 1) or t7 is Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+
+ ## condition_A and (condition_B or condition_C) and condition_D (>= = <= not Null)
+ query_sql = f'select * from {tb_name} where t4 >= 2 and (t1 = 2 or t5 <= 1.1) and t7 is not Null'
+ tdSql.query(query_sql)
+ tdSql.checkRows(11)
+
+ ## (condition_A and condition_B) or (condition_C or condition_D) (Null >= > =)
+ query_sql = f'select * from {tb_name} where (t8 is Null and t1 >= 1) or (t3 > 3 or t4 =2)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(33)
+
+ ## (condition_A or condition_B) or condition_C or (condition_D and condition_E) (>= <= = not Null <>)
+ query_sql = f'select * from {tb_name} where (t1 >= 2 or t2 <= 1) or t3 = 4 or (t7 is not Null and t6 <> 1.1)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(55)
+
+ ## condition_A or (condition_B and condition_C) or (condition_D and condition_E) and condition_F
+ query_sql = f'select * from {tb_name} where t1 != 1 or (t2 <= 1 and t3 <4) or (t3 >= 4 or t7 is not Null) and t9 <> true'
+ tdSql.query(query_sql)
+ tdSql.checkRows(55)
+
+ ## (condition_A or (condition_B and condition_C) or (condition_D and condition_E)) and condition_F
+ query_sql = f'select * from {tb_name} where (t1 != 1 or (t2 <= 2 and t3 >= 4) or (t3 >= 4 or t7 is not Null)) and t9 != false'
+ tdSql.query(query_sql)
+ tdSql.checkRows(55)
+
+ ## (condition_A or condition_B) or (condition_C or condition_D) and (condition_E or condition_F or condition_G)
+ query_sql = f'select * from {tb_name} where t1 != 1 or (t2 <= 3 and t3 > 4) and t3 <= 5 and (t7 is not Null and t9 != false)'
+ tdSql.query(query_sql)
+ tdSql.checkRows(44)
+
+ def queryColPreCal(self, tb_name):
+ ## avg sum condition_A or/and condition_B
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where c10 = 5 or c8 is Null'
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[0]), 3)
+ tdSql.checkEqual(int(res[1]), 6)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null'
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[1]), 16)
+
+ ## avg sum condition_A or/and condition_B or/and condition_C
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where c10 = 4 or c8 is Null or c9 = false '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[0]), 2)
+ tdSql.checkEqual(int(res[1]), 6)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null and c9 = false '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[0]), 1)
+ tdSql.checkEqual(int(res[1]), 1)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null or c9 = false '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[1]), 17)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where c6 = 1.1 or c8 is not Null and c9 = false '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[1]), 17)
+
+ ## count avg sum condition_A or/and condition_B or/and condition_C interval
+ query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where c10 = 4 or c8 is Null or c9 = false interval(16d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[0][1]), 1)
+ tdSql.checkEqual(int(res[0][2]), 4)
+ tdSql.checkEqual(int(res[0][3]), 4)
+ tdSql.checkEqual(int(res[1][1]), 2)
+ tdSql.checkEqual(int(res[1][2]), 1)
+ tdSql.checkEqual(int(res[1][3]), 2)
+ query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where c6 = 1.1 and c8 is not Null and c9 = false interval(16d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(1)
+ tdSql.checkEqual(int(res[0][1]), 1)
+ tdSql.checkEqual(int(res[0][2]), 1)
+ tdSql.checkEqual(int(res[0][3]), 1)
+
+ ## count avg sum condition_A or condition_B or in and like or condition_C interval
+ query_sql = f'select count(*), sum(c3) from {tb_name} where c10 = 4 or c8 is Null or c2 in (1, 2) and c7 like "binary_" or c1 <> 1 interval(16d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[0][1]), 2)
+ tdSql.checkEqual(int(res[0][2]), 5)
+ tdSql.checkEqual(int(res[1][1]), 2)
+ tdSql.checkEqual(int(res[1][2]), 2)
+
+ def queryTagPreCal(self, tb_name):
+ ## avg sum condition_A or/and condition_B
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where t10 = 5 or t8 is Null'
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[0]), 1)
+ tdSql.checkEqual(int(res[1]), 18)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null'
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[1]), 18)
+
+ ## avg sum condition_A or/and condition_B or/and condition_C
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where t10 = 4 or t8 is Null or t9 = true '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[0]), 1)
+ tdSql.checkEqual(int(res[1]), 90)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null and t9 = true '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[0]), 1)
+ tdSql.checkEqual(int(res[1]), 18)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null or t9 = true '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[1]), 90)
+ query_sql = f'select avg(c3), sum(c3) from {tb_name} where t6 = 1.1 or t8 is not Null and t9 = true '
+ res = tdSql.query(query_sql, True)[0]
+ tdSql.checkEqual(int(res[1]), 90)
+
+ ## count avg sum condition_A or/and condition_B or/and condition_C interval
+ query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where t10 = 4 or t8 is Null or t9 = true interval(16d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[0][1]), 25)
+ tdSql.checkEqual(int(res[0][2]), 2)
+ tdSql.checkEqual(int(res[0][3]), 60)
+ tdSql.checkEqual(int(res[1][1]), 30)
+ tdSql.checkEqual(int(res[1][2]), 1)
+ tdSql.checkEqual(int(res[1][3]), 30)
+ query_sql = f'select count(*), avg(c3), sum(c3) from {tb_name} where t6 = 1.1 and t8 is not Null and t9 = true interval(16d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[0][1]), 5)
+ tdSql.checkEqual(int(res[0][2]), 2)
+ tdSql.checkEqual(int(res[0][3]), 12)
+ tdSql.checkEqual(int(res[1][1]), 6)
+ tdSql.checkEqual(int(res[1][2]), 1)
+ tdSql.checkEqual(int(res[1][3]), 6)
+
+ ## count avg sum condition_A or condition_B or in and like or condition_C interval
+ query_sql = f'select count(*), sum(c3) from {tb_name} where t10 = 4 or t8 is Null or t2 in (1, 2) and t7 like "binary_" or t1 <> 1 interval(16d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[0][1]), 25)
+ tdSql.checkEqual(int(res[0][2]), 60)
+ tdSql.checkEqual(int(res[1][1]), 30)
+ tdSql.checkEqual(int(res[1][2]), 30)
+
+ def queryMultiTb(self, tb_name):
+ ## select from (condition_A or condition_B)
+ query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 >=3)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(3)
+ tdSql.checkEqual(int(res[2][0]), 11)
+
+ ## select from (condition_A or condition_B) where condition_A or condition_B
+ query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 >=3) where c1 =2 or c4 = 2'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[1][0]), 3)
+
+ ## select from (condition_A or condition_B and like and in) where condition_A or condition_B or like and in
+ query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(7)
+ tdSql.checkEqual(int(res[6][0]), 10)
+
+ ## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or like and in interval
+ query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or c8 like "ncha_" and c9 in (true) interval(8d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(3)
+ tdSql.checkEqual(int(res[0][1]), 3)
+ tdSql.checkEqual(int(res[0][2]), 1)
+ tdSql.checkEqual(int(res[0][3]), 10)
+ tdSql.checkEqual(int(res[1][1]), 3)
+ tdSql.checkEqual(int(res[1][2]), 3)
+ tdSql.checkEqual(int(res[1][3]), 3)
+ tdSql.checkEqual(int(res[2][1]), 1)
+ tdSql.checkEqual(int(res[2][2]), 1)
+ tdSql.checkEqual(int(res[2][3]), 1)
+
+ ## cname
+ query_sql = f'select c10 from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) a where a.c1 != 2 or a.c3 = 1 or a.c8 like "ncha_" and a.c9 in (true)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(7)
+ tdSql.checkEqual(int(res[6][0]), 10)
+
+ ## multi cname
+ query_sql = f'select b.c10 from (select * from {tb_name} where c9 = true or c2 = 2) a, (select * from {tb_name} where c7 like "binar_" or c4 in (3, 5)) b where a.ts = b.ts'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(10)
+ tdSql.checkEqual(int(res[9][0]), 10)
+
+ def queryMultiTbWithTag(self, tb_name):
+ ## select count avg sum from (condition_A or condition_B and like and in) where condition_A or condition_B or condition_tag_C or condition_tag_D or like and in interval
+ query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >1 or c2 = 2 and c7 like "binar_" and c4 in (3, 5)) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(3)
+ tdSql.checkEqual(int(res[0][1]), 17)
+ tdSql.checkEqual(int(res[0][2]), 1)
+ tdSql.checkEqual(int(res[0][3]), 38)
+ tdSql.checkEqual(int(res[1][1]), 10)
+ tdSql.checkEqual(int(res[1][2]), 2)
+ tdSql.checkEqual(int(res[1][3]), 17)
+ tdSql.checkEqual(int(res[2][1]), 8)
+ tdSql.checkEqual(int(res[2][2]), 1)
+ tdSql.checkEqual(int(res[2][3]), 15)
+
+ ## select count avg sum from (condition_A and condition_B and and line and in and ts and condition_tag_A and condition_tag_B and between) where condition_C orr condition_D or condition_tag_C or condition_tag_D or like and in interval
+ query_sql = f'select count(*), avg(c6), sum(c3) from (select * from {tb_name} where c1 >= 1 and c2 = 2 and c7 like "binar_" and c4 in (3, 5) and ts > "2021-01-11 12:00:00" and t1 < 2 and t1 > 0 and c6 between 0 and 7) where c1 != 2 or c3 = 1 or t1=2 or t1=3 or c8 like "ncha_" and c9 in (true) interval(8d)'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(int(res[0][1]), 2)
+ tdSql.checkEqual(int(res[0][2]), 1)
+ tdSql.checkEqual(int(res[0][3]), 2)
+ tdSql.checkEqual(int(res[1][1]), 1)
+ tdSql.checkEqual(int(res[1][2]), 1)
+ tdSql.checkEqual(int(res[1][3]), 1)
+
+ def queryJoin(self, tb_name):
+ ## between tss tag
+ query_sql = f'select stb1.ts, stb2.ts, stb1.t1, stb1.c10 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.ts <= "2021-01-07 12:00:00" and stb2.ts < "2021-01-07 12:00:00" and stb1.t1 = stb2.t1'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ tdSql.checkEqual(str(res[0][0]), "2021-01-01 12:00:00")
+ tdSql.checkEqual(str(res[1][1]), "2021-01-05 12:00:00")
+ ## between ts tag col
+ query_sql = f'select stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb2.c2 <= 2 and stb1.c1 > 0'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(9)
+ ## between ts tags
+ query_sql = f'select stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb1.t1 = 1 '
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(11)
+ ## between ts tag tbnames
+ query_sql = f'select stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb1.tbname is not Null'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(11)
+ ## between ts col tag tbname
+ query_sql = f'select stb1.tbname, stb1.t1, stb2.t1, stb1.c1, stb2.c2 from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and stb1.tbname is not Null and stb1.c2 = 3'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+ query_sql = f'select stb1.tbname, stb1.*, stb2.tbname, stb1.* from {tb_name}1 stb1, {tb_name}2 stb2 where stb1.ts = stb2.ts and stb1.t1 = stb2.t1 and (stb1.t2 != 1 or stb1.t3 <= 1) and (stb2.tbname like "{tb_name}%" or stb2.tbname is Null ) and stb1.tbname is not Null and stb2.c2 = 3'
+ res = tdSql.query(query_sql, True)
+ tdSql.checkRows(2)
+
+ def checkTbColTypeOperator(self):
+ '''
+ Ordinary table full column type and operator
+ '''
+ tb_name = self.initTb()
+ self.queryFullColType(tb_name)
+
+ def checkStbColTypeOperator(self):
+ '''
+ Super table full column type and operator
+ '''
+ tb_name = self.initStb()
+ self.queryFullColType(f'{tb_name}_sub_1')
+
+
+ def checkStbTagTypeOperator(self):
+ '''
+ Super table full tag type and operator
+ '''
+ tb_name = self.initStb()
+ self.queryFullTagType(tb_name)
+
+ def checkTbTsCol(self):
+ '''
+ Ordinary table ts and col check
+ '''
+ tb_name = self.initTb()
+ self.queryTsCol(tb_name)
+
+ def checkStbTsTol(self):
+ tb_name = self.initStb()
+ self.queryTsCol(f'{tb_name}_sub_1')
+
+ def checkStbTsTag(self):
+ tb_name = self.initStb()
+ self.queryTsTag(tb_name)
+
+ def checkStbTsColTag(self):
+ tb_name = self.initStb()
+ self.queryTsColTag(tb_name)
+
+ def checkTbMultiExpression(self):
+ '''
+ Ordinary table multiExpression
+ '''
+ tb_name = self.initTb()
+ self.queryColMultiExpression(tb_name)
+
+ def checkStbMultiExpression(self):
+ '''
+ Super table multiExpression
+ '''
+ tb_name = self.initStb()
+ self.queryColMultiExpression(f'{tb_name}_sub_1')
+ self.queryTagMultiExpression(tb_name)
+
+ def checkTbPreCal(self):
+ '''
+ Ordinary table precal
+ '''
+ tb_name = self.initTb()
+ self.queryColPreCal(tb_name)
+
+ def checkStbPreCal(self):
+ '''
+ Super table precal
+ '''
+ tb_name = self.initStb()
+ self.queryColPreCal(f'{tb_name}_sub_1')
+ self.queryTagPreCal(tb_name)
+
+ def checkMultiTb(self):
+ '''
+ test "or" in multi ordinary table
+ '''
+ tb_name = self.initTb()
+ self.queryMultiTb(tb_name)
+
+ def checkMultiStb(self):
+ '''
+ test "or" in multi super table
+ '''
+ tb_name = self.initStb()
+ self.queryMultiTb(f'{tb_name}_sub_1')
+
+ def checkMultiTbWithTag(self):
+ '''
+ test Multi tb with tag
+ '''
+ tb_name = self.initStb()
+ self.queryMultiTbWithTag(tb_name)
+
+ def checkMultiStbJoin(self):
+ '''
+ join test
+ '''
+ tb_name = self.initTwoStb()
+ self.queryJoin(tb_name)
+
+ def run(self):
+ tdSql.prepare()
+ self.checkTbColTypeOperator()
+ self.checkStbColTypeOperator()
+ self.checkStbTagTypeOperator()
+ self.checkTbTsCol()
+ self.checkStbTsTol()
+ self.checkStbTsTag()
+ self.checkStbTsColTag()
+ self.checkTbMultiExpression()
+ self.checkStbMultiExpression()
+ self.checkTbPreCal()
+ self.checkStbPreCal()
+ self.checkMultiTb()
+ self.checkMultiStb()
+ self.checkMultiTbWithTag()
+ self.checkMultiStbJoin()
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/queryRegex.py b/tests/pytest/query/queryRegex.py
index c955920bfd553f9b9d48b2e8f0730a361afdc8df..9edc1db60d5b406b765108bb4ed96c4cda017664 100644
--- a/tests/pytest/query/queryRegex.py
+++ b/tests/pytest/query/queryRegex.py
@@ -29,18 +29,18 @@ class TDTestCase:
print("==============step1")
##2021-09-17 For jira: https://jira.taosdata.com:18080/browse/TD-6585
tdSql.execute(
- "create stable if not exists stb_test(ts timestamp,c0 binary(32),c1 int) tags(t0 binary(32))"
+ "create stable if not exists stb_test(ts timestamp,c0 binary(32),c1 int,c2 nchar(50)) tags(t0 binary(32),t1 nchar(50))"
)
tdSql.execute(
- 'create table if not exists stb_1 using stb_test tags("abcdefgasdfg12346")'
+ 'create table if not exists stb_1 using stb_test tags("abcdefgasdfg12346","涛思数据")'
)
- tdLog.info('insert into stb_1 values("2021-09-13 10:00:00.001","abcefdasdqwerxasdazx12345",15')
+ tdLog.info('insert into stb_1 values("2021-09-13 10:00:00.001","abcefdasdqwerxasdazx12345",15,"引擎一组"')
- tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.002","abcefdasdqwerxasdazx12345",15)')
- tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.003","aaaaafffwwqqxzz",16)')
- tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.004","fffwwqqxzz",17)')
- tdSql.execute('insert into stb_1 values("2020-10-13 10:00:00.001","abcd\\\efgh",100)')
+ tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.002","abcefdasdqwerxasdazx12345",15,"引擎一组一号")')
+ tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.003","aaaaafffwwqqxzz",16,"引擎一组二号")')
+ tdSql.execute('insert into stb_1 values("2021-09-13 10:00:00.004","fffwwqqxzz",17,"涛涛思思")')
+ tdSql.execute('insert into stb_1 values("2020-10-13 10:00:00.001","abcd\\\efgh",100,"思涛思")')
tdSql.query('select * from stb_test where tbname match "asd"')
tdSql.checkRows(0)
@@ -98,6 +98,31 @@ class TDTestCase:
tdSql.query("select * from stb_1 where c0 nmatch '\\\\'")
tdSql.checkRows(3)
+ #2021-10-20 for https://jira.taosdata.com:18080/browse/TD-10708
+ tdSql.query('select * from stb_1 where c2 match "^涛"')
+ tdSql.checkRows(1)
+
+ tdSql.query('select * from stb_1 where c2 nmatch "^涛"')
+ tdSql.checkRows(3)
+
+ tdSql.query('select * from stb_1 where c2 match "号$"')
+ tdSql.checkRows(2)
+
+ tdSql.query('select * from stb_1 where c2 nmatch "号$"')
+ tdSql.checkRows(2)
+
+ tdSql.query('select * from stb_1 where c2 match "涛+思"')
+ tdSql.checkRows(2)
+
+ tdSql.query('select * from stb_1 where c2 nmatch "涛+思"')
+ tdSql.checkRows(2)
+
+ tdSql.query('select * from stb_1 where c2 match "涛*思"')
+ tdSql.checkRows(2)
+
+ tdSql.query('select * from stb_1 where c2 nmatch "涛*思"')
+ tdSql.checkRows(2)
+
diff --git a/tests/pytest/query/schemalessQueryCrash.py b/tests/pytest/query/schemalessQueryCrash.py
new file mode 100644
index 0000000000000000000000000000000000000000..325bf7e5daa168b864c99caf3eb423155b25f7d5
--- /dev/null
+++ b/tests/pytest/query/schemalessQueryCrash.py
@@ -0,0 +1,22 @@
+###################################################################
+# Copyright (c) 2021 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+import time
+import taos
+conn = taos.connect()
+stb_name = "test_crash"
+conn.execute("use test")
+conn.execute(f"select * from {stb_name}")
+time.sleep(4)
+conn.execute(f"select * from {stb_name}")
diff --git a/tests/pytest/query/unionAllTest.py b/tests/pytest/query/unionAllTest.py
index cbff89fd9576b984887c832a51d47a577e89bea4..d1e1bf4d3e11191be2875c464150793936acd065 100644
--- a/tests/pytest/query/unionAllTest.py
+++ b/tests/pytest/query/unionAllTest.py
@@ -103,7 +103,6 @@ class TDTestCase:
select count(*) as count, loc from st where ts between 1600000000000 and 1600000000010 group by loc''')
tdSql.checkRows(6)
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/tag_lite/json_tag_extra.py b/tests/pytest/tag_lite/json_tag_extra.py
new file mode 100644
index 0000000000000000000000000000000000000000..40ee69d46b770a33a8255783f675d5071513bc28
--- /dev/null
+++ b/tests/pytest/tag_lite/json_tag_extra.py
@@ -0,0 +1,375 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, db_test.stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+import time
+import random
+
+class TDTestCase:
+
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1 tag format =======")
+ tdLog.info("create database two stables and ")
+ tdSql.execute("create database db_json_tag_test")
+ tdSql.execute("use db_json_tag_test")
+ # test tag format
+ tdSql.execute("create table if not exists jsons1(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(128))")
+ tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(64),jtag1 json(100))")
+ tdSql.error("create table if not exists jsons1(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(64),dataBool bool)")
+
+ tdSql.execute("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":\"fff\",\"id\":5}')")
+ tdSql.execute("use db_json_tag_test")
+
+
+ # two stables: jsons1 jsons2 ,test tag's value and key
+ tdSql.execute("insert into jsons1_1(ts,dataInt) using jsons1 tags('{\"loc+\":\"fff\",\"id\":5}') values (now,12)")
+
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{oc:\"fff\",\"id\":5}')")
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":fff,\"id\":5}')")
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('3333')")
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":}')")
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('{\"loc\":bool)")
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags(true)")
+ tdSql.error("CREATE TABLE if not exists jsons1_1 using jsons1 tags('[{\"num\":5}]')")
+
+ # test object and key max length. max key length is 256, max object length is 4096 include abcd.
+ tdSql.execute("create table if not exists jsons4(ts timestamp, dataInt int, dataStr nchar(50)) tags(jtag json(128))")
+
+ char1= ''.join(['abcd']*64)
+ char2=''.join(char1)
+ char3= ''.join(['abcd']*1022)
+ print(len(char3)) # 4088
+ tdSql.execute("CREATE TABLE if not exists jsons4_1 using jsons4 tags('{\"%s\":5}')" % char1) # len(key)=256
+ tdSql.error("CREATE TABLE if not exists jsons4_1 using jsons4 tags('{\"%s1\":5}')" % char2) # len(key)=257
+ tdSql.execute("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"T\":\"%s\"}')" % char3) # len(object)=4096
+ tdSql.error("CREATE TABLE if not exists jsons4_2 using jsons4 tags('{\"TS\":\"%s\"}')" % char3) # len(object)=4097
+
+ tdSql.execute("insert into jsons1_1 values(now, 1, 'json1')")
+ tdSql.execute("insert into jsons1_1 values(now+1s, 1, 'json1')")
+ tdSql.execute("insert into jsons1_2 using jsons1 tags('{\"num\":5,\"location\":\"beijing\"}') values (now, 1, 'json2')")
+ tdSql.execute("insert into jsons1_3 using jsons1 tags('{\"num\":34,\"location\":\"beijing\",\"level\":\"l1\"}') values (now, 1, 'json3')")
+ tdSql.execute("insert into jsons1_4 using jsons1 tags('{\"class\":55,\"location\":\"beijing\",\"name\":\"name4\"}') values (now, 1, 'json4')")
+
+ # test : json'vaule is null and
+ tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt2 int, dataStr2 nchar(50)) tags(jtag2 json(300))")
+ tdSql.execute("CREATE TABLE if not exists jsons2_1 using jsons2 tags('{}')")
+ tdSql.query("select jtag2 from jsons2_1")
+ tdSql.checkData(0, 0, None)
+ tdSql.execute("CREATE TABLE if not exists jsons2_2 using jsons2 tags('')")
+ tdSql.query("select jtag2 from jsons2_2")
+ tdSql.checkData(0, 0, None)
+ tdSql.execute("CREATE TABLE if not exists jsons2_3 using jsons2 tags('null')")
+ tdSql.query("select jtag2 from jsons2_3")
+ tdSql.checkData(0, 0, None)
+ tdSql.execute("CREATE TABLE if not exists jsons2_4 using jsons2 tags('\t')")
+ tdSql.query("select jtag2 from jsons2_4")
+ tdSql.checkData(0, 0, None)
+ tdSql.execute("CREATE TABLE if not exists jsons2_5 using jsons2 tags(' ')")
+ tdSql.query("select jtag2 from jsons2_5")
+ tdSql.checkData(0, 0, None)
+ tdSql.execute("CREATE TABLE if not exists jsons2_6 using jsons2 tags('{\"nv\":null,\"tea\":true,\"\":false,\"\":123,\"tea\":false}')")
+ tdSql.query("select jtag2 from jsons2_6")
+ tdSql.checkData(0, 0, "{\"tea\":true}")
+ tdSql.error("CREATE TABLE if not exists jsons2_7 using jsons2 tags('{\"nv\":null,\"tea\":123,\"\":false,\"\":123,\"tea\":false}')")
+ tdSql.execute("CREATE TABLE if not exists jsons2_7 using jsons2 tags('{\"test7\":\"\"}')")
+ tdSql.query("select jtag2 from jsons2_7")
+ tdSql.checkData(0, 0, "{\"test7\":\"\"}")
+
+ print("==============step2 alter json table==")
+ tdLog.info("alter stable add tag")
+ tdSql.error("ALTER STABLE jsons2 add tag jtag3 nchar(20)")
+ tdSql.error("ALTER STABLE jsons2 drop tag jtag2")
+ tdSql.execute("ALTER STABLE jsons2 change tag jtag2 jtag3")
+ tdSql.query("select jtag3 from jsons2_6")
+ tdSql.checkData(0, 0, "{\"tea\":true}")
+ tdSql.error("ALTER TABLE jsons2_6 SET TAG jtag3='{\"tea-=[].;!@#$%^&*()/\":}'")
+ tdSql.execute("ALTER TABLE jsons2_6 SET TAG jtag3='{\"tea-=[].;!@#$%^&*()/\":false}'")
+ tdSql.query("select jtag3 from jsons2_6")
+ tdSql.checkData(0, 0, "{\"tea-=[].;!@#$%^&*()/\":false}")
+ tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"sex\":\"femail\",\"age\":35}'")
+ tdSql.query("select jtag from jsons1_1")
+ tdSql.checkData(0, 0, "{\"sex\":\"femail\",\"age\":35}")
+
+
+
+ print("==============step3")
+ tdLog.info("select table")
+
+ tdSql.query("select jtag from jsons1_1")
+ tdSql.checkData(0, 0, "{\"sex\":\"femail\",\"age\":35}")
+
+ tdSql.query("select jtag from jsons1 where jtag->'name'='name4'")
+ tdSql.checkData(0, 0, "{\"class\":55,\"location\":\"beijing\",\"name\":\"name4\"}")
+
+
+ tdSql.query("select * from jsons1")
+ tdSql.checkRows(6)
+
+ tdSql.query("select * from jsons1_1")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from jsons1 where jtag->'location'='beijing'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select jtag->'location' from jsons1_2")
+ tdSql.checkData(0, 0, "beijing")
+
+
+ tdSql.query("select jtag->'num' from jsons1 where jtag->'level'='l1'")
+ tdSql.checkData(0, 0, 34)
+
+ tdSql.query("select jtag->'location' from jsons1")
+ tdSql.checkRows(4)
+
+ tdSql.query("select jtag from jsons1_1")
+ tdSql.checkRows(1)
+
+ tdSql.query("select * from jsons1 where jtag?'sex' or jtag?'num'")
+ tdSql.checkRows(5)
+
+ tdSql.query("select * from jsons1 where jtag?'sex' and jtag?'num'")
+ tdSql.checkRows(0)
+
+ tdSql.query("select jtag->'sex' from jsons1 where jtag?'sex' or jtag?'num'")
+ tdSql.checkData(0, 0, "femail")
+ tdSql.checkRows(3)
+
+ tdSql.query("select *,tbname from jsons1 where jtag->'location'='beijing'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select *,tbname from jsons1 where jtag->'num'=5 or jtag?'sex'")
+ tdSql.checkRows(4)
+
+ # test with tbname
+ tdSql.query("select * from jsons1 where tbname = 'jsons1_1'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from jsons1 where tbname = 'jsons1_1' or jtag?'num'")
+ tdSql.checkRows(5)
+
+ tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag?'num'")
+ tdSql.checkRows(0)
+
+ tdSql.query("select * from jsons1 where tbname = 'jsons1_1' or jtag->'num'=5")
+ tdSql.checkRows(4)
+
+ # test where condition like
+ tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%' and jtag->'location'='beijin'")
+ tdSql.checkRows(0)
+
+ tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%' or jtag->'location'='beijin'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select *,tbname from jsons1 where jtag->'location' like 'bei%' and jtag->'num'=34")
+ tdSql.checkRows(1)
+
+ tdSql.query("select *,tbname from jsons1 where (jtag->'location' like 'shanghai%' or jtag->'num'=34) and jtag->'class'=55")
+ tdSql.checkRows(0)
+
+ tdSql.error("select * from jsons1 where jtag->'num' like '5%'")
+
+ # test where condition in
+ tdSql.query("select * from jsons1 where jtag->'location' in ('beijing')")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from jsons1 where jtag->'num' in (5,34)")
+ tdSql.checkRows(2)
+
+ tdSql.error("select * from jsons1 where jtag->'num' in ('5',34)")
+
+ tdSql.query("select * from jsons1 where jtag->'location' in ('beijing') and jtag->'class'=55")
+ tdSql.checkRows(1)
+
+ # test where condition match
+ tdSql.query("select * from jsons1 where jtag->'location' match 'jin$'")
+ tdSql.checkRows(0)
+
+ tdSql.query("select * from jsons1 where jtag->'location' match 'jin'")
+ tdSql.checkRows(3)
+
+ tdSql.query("select * from jsons1 where datastr match 'json' and jtag->'location' match 'jin'")
+ tdSql.checkRows(3)
+
+ tdSql.error("select * from jsons1 where jtag->'num' match '5'")
+
+ # test json string parse
+ tdSql.error("CREATE TABLE if not exists jsons1_5 using jsons1 tags('efwewf')")
+ tdSql.execute("CREATE TABLE if not exists jsons1_5 using jsons1 tags('\t')")
+ tdSql.execute("CREATE TABLE if not exists jsons1_6 using jsons1 tags('')")
+
+ tdSql.query("select jtag from jsons1_6")
+ tdSql.checkData(0, 0, None)
+
+ tdSql.execute("CREATE TABLE if not exists jsons1_7 using jsons1 tags('{}')")
+ tdSql.query("select jtag from jsons1_7")
+ tdSql.checkData(0, 0, None)
+
+ tdSql.execute("CREATE TABLE if not exists jsons1_8 using jsons1 tags('null')")
+ tdSql.query("select jtag from jsons1_8")
+ tdSql.checkData(0, 0, None)
+
+ tdSql.execute("CREATE TABLE if not exists jsons1_9 using jsons1 tags('{\"\":4, \"time\":null}')")
+ tdSql.query("select jtag from jsons1_9")
+ tdSql.checkData(0, 0, None)
+
+ tdSql.execute("CREATE TABLE if not exists jsons1_10 using jsons1 tags('{\"k1\":\"\",\"k1\":\"v1\",\"k2\":true,\"k3\":false,\"k4\":55}')")
+ tdSql.query("select jtag from jsons1_10")
+ tdSql.checkData(0, 0, "{\"k1\":\"\",\"k2\":true,\"k3\":false,\"k4\":55}")
+
+ tdSql.query("select jtag->'k2' from jsons1_10")
+ tdSql.checkData(0, 0, "true")
+
+ tdSql.query("select jtag from jsons1 where jtag->'k1'=''")
+ tdSql.checkRows(1)
+
+ tdSql.query("select jtag from jsons1 where jtag->'k2'=true")
+ tdSql.checkRows(1)
+
+ tdSql.query("select jtag from jsons1 where jtag is null")
+ tdSql.checkRows(5)
+
+ tdSql.query("select jtag from jsons1 where jtag is not null")
+ tdSql.checkRows(5)
+
+ tdSql.query("select * from jsons1 where jtag->'location' is not null")
+ tdSql.checkRows(3)
+
+ tdSql.query("select tbname,jtag from jsons1 where jtag->'location' is null")
+ tdSql.checkRows(7)
+
+ tdSql.query("select * from jsons1 where jtag->'num' is not null")
+ tdSql.checkRows(2)
+
+ tdSql.query("select * from jsons1 where jtag->'location'='null'")
+ tdSql.checkRows(0)
+
+ tdSql.error("select * from jsons1 where jtag->'num'='null'")
+
+ # test distinct
+ tdSql.query("select distinct jtag from jsons1")
+ tdSql.checkRows(6)
+
+ tdSql.query("select distinct jtag->'location' from jsons1")
+ tdSql.checkRows(2)
+
+ # test chinese
+ tdSql.execute("CREATE TABLE if not exists jsons1_11 using jsons1 tags('{\"k1\":\"中国\",\"k5\":\"是是是\"}')")
+
+ tdSql.query("select tbname,jtag from jsons1 where jtag->'k1' match '中'")
+ tdSql.checkRows(1)
+
+ tdSql.query("select tbname,jtag from jsons1 where jtag->'k1'='中国'")
+ tdSql.checkRows(1)
+
+ #test dumplicate key with normal colomn
+ tdSql.execute("INSERT INTO jsons1_12 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"dataStr\":\"是是是\"}') values(now, 4, \"你就会\")")
+
+ tdSql.query("select *,tbname,jtag from jsons1 where jtag->'dataStr' match '是'")
+ tdSql.checkRows(1)
+
+ tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt'")
+ tdSql.checkRows(1)
+
+ # tdSql.query("select * from jsons1 where jtag->'num' is not null or jtag?'class' and jtag?'databool'")
+ # tdSql.checkRows(0)
+
+ # tdSql.query("select * from jsons1 where jtag->'num' is not null or jtag?'class' and jtag?'databool' and jtag->'k1' match '中' or jtag->'location' in ('beijing') and jtag->'location' like 'bei%'")
+
+ # tdSql.query("select * from jsons1 where datastr like '你就会' and ( jtag->'num' is not null or jtag?'class' and jtag?'databool' )")
+
+
+ tdSql.error("select * from jsons1 where datastr like '你就会' or jtag->'num' is not null or jtag?'class' and jtag?'databool' and jtag->'k1' match '中' or jtag->'location' in ('beijing') and jtag->'location' like 'bei%' ")
+
+ # tdSql.query("select * from jsons1 where datastr like '你就会' and (jtag->'num' is not null or jtag?'class' and jtag?'databool' and jtag->'k1' match '中' or jtag->'location' in ('beijing') and jtag->'location' like 'bei%' )")
+ # tdSql.checkRows(0)
+
+ tdSql.error("select *,tbname,jtag from jsons1 where dataBool=true")
+
+ # test error
+ tdSql.error("CREATE TABLE if not exists jsons1_13 using jsons1 tags(3333)")
+ tdSql.execute("CREATE TABLE if not exists jsons1_13 using jsons1 tags('{\"1loc\":\"fff\",\";id\":5}')")
+ tdSql.error("CREATE TABLE if not exists jsons1_13 using jsons1 tags('{\"。loc\":\"fff\",\"fsd\":5}')")
+ tdSql.error("CREATE TABLE if not exists jsons1_13 using jsons1 tags('{\"试试\":\"fff\",\";id\":5}')")
+ tdSql.error("insert into jsons1_13 using jsons1 tags(3)")
+
+ # test query normal column
+ tdSql.execute("create stable if not exists jsons3(ts timestamp, dataInt3 int(100), dataBool3 bool, dataStr3 nchar(50)) tags(jtag3 json)")
+ tdSql.execute("create table jsons3_2 using jsons3 tags('{\"t\":true,\"t123\":123,\"\":\"true\"}')")
+
+ tdSql.execute("create table jsons3_3 using jsons3 tags('{\"t\":true,\"t123\":456,\"k1\":true}')")
+ tdSql.execute("insert into jsons3_3 values(now, 4, true, 'test')")
+
+ tdSql.execute("insert into jsons3_4 using jsons3 tags('{\"t\":true,\"t123\":789,\"k1\":false,\"s\":null}') values(now, 5, true, 'test')")
+ tdSql.query("select * from jsons3 where jtag3->'k1'=true")
+ tdSql.checkRows(1)
+ tdSql.error("select jtag3->k1 from jsons3 ")
+ tdSql.error("select jtag3 from jsons3 where jtag3->'k1'")
+ tdSql.error("select jtag3 from jsons3 where jtag3?'k1'=true")
+ tdSql.error("select jtag3?'k1' from jsons3;")
+ tdSql.error("select jtag3?'k1'=true from jsons3;")
+ tdSql.error("select jtag3->'k1'=true from jsons3;")
+ tdSql.error("insert into jsons3_5 using jsons3 tags('{\"t\":true,\"t123\":789,\"k1\":1,\"s\":null}') values(now, 5, true, 'test')")
+ tdSql.execute("insert into jsons3_5 using jsons3 tags('{\"t\":true,\"t123\":012,\"k1\":null,\"s\":null}') values(now, 5, true, 'test')")
+ tdSql.execute("insert into jsons3_6 using jsons3 tags('{\"t\":true,\"t123\":789,\"k1\":false,\"s\":null}') values(now, 5, true, 'test')")
+ # tdSql.execute("select distinct jtag3 from jsons3 where jtag3->'t123'=12 or jtag3?'k1'")
+ # tdSql.checkRows(3)
+
+
+ tdSql.execute("INSERT INTO jsons1_14 using jsons1 tags('{\"tbname\":\"tt\",\"location\":\"tianjing\",\"dataStr\":\"是是是\"}') values(now,5, \"你就会\")")
+
+ # tdSql.execute("select ts,dataint3,jtag->tbname from jsons1 where dataint>=1 and jtag->'location' in ('tianjing','123') and jtag?'tbname'")
+ # tdSql.checkRows(1)
+ # tdSql.checkData(0, 2, 'tt')
+
+ # query normal column and tag column
+ tdSql.query("select jtag3->'',dataint3 from jsons3")
+ tdSql.checkRows(4)
+
+ # query child table
+
+ tdSql.error("select * from jsons3_2 where jtag3->'k1'=true;")
+
+ # tdSql.checkData(0, 0, None)
+ # tdSql.checkRows(3)
+
+
+
+ # # test drop tables and databases
+ # tdSql.execute("drop table jsons1_1")
+ # tdSql.execute("drop stable jsons1")
+ # tdSql.execute("drop stable jsons3")
+ # tdSql.execute("drop stable jsons2")
+ # tdSql.execute("drop database db_json_tag_test")
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json
index 5e53bd7e7d10edea9bdbc56ef9ab737dbb34684e..c2e4920097cd1b3581c9893c9677c3cf1f14b7ed 100644
--- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json
+++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ms",
- "keep": 365,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
@@ -36,7 +36,7 @@
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 60,
- "childtable_prefix": "stb00_",
+ "childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 20,
"data_source": "rand",
diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json b/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json
new file mode 100644
index 0000000000000000000000000000000000000000..b14c3a8ec6d329e187c84b87412570e220eddb73
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/insert-sample-ts.json
@@ -0,0 +1,89 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "dbtest123",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 36500,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "sample",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 80,
+ "disorder_range": 10,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/sample_ts.csv",
+ "use_sample_ts": "yes",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count":2,
+ "childtable_prefix": "stb1_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "./tools/taosdemoAllTest/tags.csv",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/sample_ts.csv b/tests/pytest/tools/taosdemoAllTest/sample_ts.csv
new file mode 100644
index 0000000000000000000000000000000000000000..53948c3d2a0768a9c421b289710a69ff881b2d02
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/sample_ts.csv
@@ -0,0 +1,4 @@
+'2021-10-28 15:34:44.735',1,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true'
+'2021-10-29 15:34:44.735',2,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','true'
+'2021-10-30 15:34:44.735',3,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','false'
+1635665684735,4,-1,2147483647,0,2247483647.1,-12.2,'12ac,;\[uer]','23ac,;\[uer23423]123123','false'
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json
new file mode 100644
index 0000000000000000000000000000000000000000..b14c3a8ec6d329e187c84b87412570e220eddb73
--- /dev/null
+++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json
@@ -0,0 +1,89 @@
+{
+ "filetype": "insert",
+ "cfgdir": "/etc/taos",
+ "host": "127.0.0.1",
+ "port": 6030,
+ "user": "root",
+ "password": "taosdata",
+ "thread_count": 4,
+ "thread_count_create_tbl": 4,
+ "result_file":"./insert_res.txt",
+ "confirm_parameter_prompt": "no",
+ "insert_interval": 0,
+ "interlace_rows": 10,
+ "num_of_records_per_req": 1000,
+ "max_sql_len": 1024000,
+ "databases": [{
+ "dbinfo": {
+ "name": "dbtest123",
+ "drop": "yes",
+ "replica": 1,
+ "days": 10,
+ "cache": 50,
+ "blocks": 8,
+ "precision": "ms",
+ "keep": 36500,
+ "minRows": 100,
+ "maxRows": 4096,
+ "comp":2,
+ "walLevel":1,
+ "cachelast":0,
+ "quorum":1,
+ "fsync":3000,
+ "update": 0
+ },
+ "super_tables": [{
+ "name": "stb0",
+ "child_table_exists":"no",
+ "childtable_count": 10,
+ "childtable_prefix": "stb0_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "sample",
+ "insert_mode": "taosc",
+ "insert_rows": 100,
+ "childtable_limit": -1,
+ "childtable_offset": 0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval": 0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 80,
+ "disorder_range": 10,
+ "timestamp_step": 1,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./tools/taosdemoAllTest/sample_ts.csv",
+ "use_sample_ts": "yes",
+ "tags_file": "",
+ "columns": [{"type": "INT", "count":3}, {"type": "DOUBLE", "count":3}, {"type": "BINARY", "len": 16, "count":1}, {"type": "BINARY", "len": 32, "count":1}, {"type": "BOOL"}],
+ "tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
+ },
+ {
+ "name": "stb1",
+ "child_table_exists":"no",
+ "childtable_count":2,
+ "childtable_prefix": "stb1_",
+ "auto_create_table": "no",
+ "batch_create_tbl_num": 10,
+ "data_source": "rand",
+ "insert_mode": "taosc",
+ "insert_rows": 10,
+ "childtable_limit": -1,
+ "childtable_offset":0,
+ "multi_thread_write_one_tbl": "no",
+ "interlace_rows": 0,
+ "insert_interval":0,
+ "max_sql_len": 1024000,
+ "disorder_ratio": 0,
+ "disorder_range": 1000,
+ "timestamp_step": 10,
+ "start_timestamp": "2020-10-01 00:00:00.000",
+ "sample_format": "csv",
+ "sample_file": "./sample.csv",
+ "tags_file": "./tools/taosdemoAllTest/tags.csv",
+ "columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
+ "tags": [{"type": "TINYINT", "count":3}, {"type": "BINARY", "len": 16, "count":2}]
+ }]
+ }]
+}
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json
index 49ab6f3a4367b4cebd840bb24b43a5d190c0d464..fd458a88d1a434c22958d5086949009cdd6080bf 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ms",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json
index 9a35df917dcbb2600852e8172da0be3ffacb0d15..99233bdd738d068664241efda40d96c5a6fc7090 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ns",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json
index 631179dbaebfff29de6b38831b78fede989369d4..14bb9e9be07d9bd61dc089af0bb34acd523155d9 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "us",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
index a0b669d5f12e9ba8e2052f82c2d6d8ac349bd017..0e68e2e88078d3239ceba8d88200e7ea5b1cffe4 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJson.py
@@ -262,6 +262,25 @@ class TDTestCase:
tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 10)
+ # insert: sample json
+ os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample-ts.json -y " % binPath)
+ tdSql.execute("use dbtest123")
+ tdSql.query("select c2 from stb0")
+ tdSql.checkData(0, 0, 2147483647)
+ tdSql.query("select c0 from stb0_0 order by ts")
+ tdSql.checkData(3, 0, 4)
+ tdSql.query("select count(*) from stb0 order by ts")
+ tdSql.checkData(0, 0, 40)
+ tdSql.query("select * from stb0_1 order by ts")
+ tdSql.checkData(0, 0, '2021-10-28 15:34:44.735')
+ tdSql.checkData(3, 0, '2021-10-31 15:34:44.735')
+ tdSql.query("select * from stb1 where t1=-127")
+ tdSql.checkRows(20)
+ tdSql.query("select * from stb1 where t2=127")
+ tdSql.checkRows(10)
+ tdSql.query("select * from stb1 where t2=126")
+ tdSql.checkRows(10)
+
# insert: sample json
os.system("%staosdemo -f tools/taosdemoAllTest/insert-sample.json -y " % binPath)
tdSql.execute("use dbtest123")
@@ -274,6 +293,7 @@ class TDTestCase:
tdSql.query("select * from stb1 where t2=126")
tdSql.checkRows(10)
+
# insert: test interlace parament
os.system("%staosdemo -f tools/taosdemoAllTest/insert-interlace-row.json -y " % binPath)
tdSql.execute("use db")
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
index 0aade4318390b43d8781cdac3deff3f1d7623b10..f09880ab727d9a197fb602663da1dc4c6fff7bb7 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestInsertWithJsonStmt.py
@@ -215,6 +215,25 @@ class TDTestCase:
tdSql.checkData(0, 0, 5000000)
+ # insert: sample json
+ os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-sample-ts-stmt.json -y " % binPath)
+ tdSql.execute("use dbtest123")
+ tdSql.query("select c2 from stb0")
+ tdSql.checkData(0, 0, 2147483647)
+ tdSql.query("select c0 from stb0_0 order by ts")
+ tdSql.checkData(3, 0, 4)
+ tdSql.query("select count(*) from stb0 order by ts")
+ tdSql.checkData(0, 0, 40)
+ tdSql.query("select * from stb0_1 order by ts")
+ tdSql.checkData(0, 0, '2021-10-28 15:34:44.735')
+ tdSql.checkData(3, 0, '2021-10-31 15:34:44.735')
+ tdSql.query("select * from stb1 where t1=-127")
+ tdSql.checkRows(20)
+ tdSql.query("select * from stb1 where t2=127")
+ tdSql.checkRows(10)
+ tdSql.query("select * from stb1 where t2=126")
+ tdSql.checkRows(10)
+
# insert: timestamp and step
os.system("%staosdemo -f tools/taosdemoAllTest/stmt/insert-timestep-stmt.json -y " % binPath)
tdSql.execute("use db")
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json
index 246f1c35f29973fc20602284b37ae68de23f70c1..e6c4b3205a77e20714067733bfa6f6c4053f087c 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ns",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json
index 0726f3905de2b254b49be51a7973d34b5eb6757e..a19132b1da9c99b8fe3792a1c2d475fd4f18ef91 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ns",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json
index f36b1f9b4c1b83707b9482428d4303a5418ad2c3..3b4c43d5d05ee1a1b26ee4016b1c38aade592b56 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ns",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json
index 867619ed8c1497e76077f96d257dd09a489d9eb7..7fb90727ef6fa38da73639ebe11125924b9ed507 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json
@@ -22,7 +22,7 @@
"cache": 50,
"blocks": 8,
"precision": "ns",
- "keep": 36,
+ "keep": 36500,
"minRows": 100,
"maxRows": 4096,
"comp":2,
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
index 6021c9136ad235f3e9d07bb4f6654fdac54989e5..3a3152ecde3c4eca09d8b8583cf90bbfdc0cc31d 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestQueryWithJson.py
@@ -20,14 +20,16 @@ from util.dnodes import *
import time
from datetime import datetime
import ast
+import re
# from assertpy import assert_that
import subprocess
+
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
-
+
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
@@ -40,52 +42,54 @@ class TDTestCase:
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
- buildPath = root[:len(root)-len("/build/bin")]
+ buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
# 获取taosc接口查询的结果文件中的内容,返回每行数据,并断言数据的第一列内容。
- def assertfileDataTaosc(self,filename,expectResult):
+ def assertfileDataTaosc(self, filename, expectResult):
self.filename = filename
self.expectResult = expectResult
- with open("%s" % filename, 'r+') as f1:
+ with open("%s" % filename, 'r+') as f1:
for line in f1.readlines():
queryResult = line.strip().split()[0]
- self.assertCheck(filename,queryResult,expectResult)
+ self.assertCheck(filename, queryResult, expectResult)
# 获取restful接口查询的结果文件中的关键内容,目前的关键内容找到第一个key就跳出循,所以就只有一个数据。后续再修改多个结果文件。
- def getfileDataRestful(self,filename):
+ def getfileDataRestful(self, filename):
self.filename = filename
- with open("%s" % filename, 'r+') as f1:
+ with open("%s" % filename, 'r+') as f1:
for line in f1.readlines():
contents = line.strip()
if contents.find("data") != -1:
+ pattern = re.compile("{.*}")
+ contents = pattern.search(contents).group()
contentsDict = ast.literal_eval(contents) # 字符串转换为字典
queryResult = contentsDict['data'][0][0]
break
return queryResult
# 获取taosc接口查询次数
- def queryTimesTaosc(self,filename):
+ def queryTimesTaosc(self, filename):
self.filename = filename
- command = 'cat %s |wc -l'% filename
- times = int(subprocess.getstatusoutput(command)[1])
+ command = 'cat %s |wc -l' % filename
+ times = int(subprocess.getstatusoutput(command)[1])
return times
# 获取restful接口查询次数
- def queryTimesRestful(self,filename):
+ def queryTimesRestful(self, filename):
self.filename = filename
- command = 'cat %s |grep "200 OK" |wc -l'% filename
- times = int(subprocess.getstatusoutput(command)[1])
+ command = 'cat %s |grep "200 OK" |wc -l' % filename
+ times = int(subprocess.getstatusoutput(command)[1])
return times
# 定义断言结果是否正确。不正确返回错误结果,正确即通过。
- def assertCheck(self,filename,queryResult,expectResult):
+ def assertCheck(self, filename, queryResult, expectResult):
self.filename = filename
self.queryResult = queryResult
self.expectResult = expectResult
args0 = (filename, queryResult, expectResult)
- assert queryResult == expectResult , "Queryfile:%s ,result is %s != expect: %s" % args0
+ assert queryResult == expectResult, "Queryfile:%s ,result is %s != expect: %s" % args0
def run(self):
buildPath = self.getBuildPath()
@@ -93,109 +97,144 @@ class TDTestCase:
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
- binPath = buildPath+ "/build/bin/"
-
+ binPath = buildPath + "/build/bin/"
+
# delete useless files
- os.system("rm -rf ./query_res*")
+ os.system("rm -rf ./query_res*")
os.system("rm -rf ./all_query*")
-
- # taosc query: query specified table and query super table
- os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/queryTaosc.json" % binPath)
+
+ # taosc query: query specified table and query super table
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" %
+ binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryTaosc.json" %
+ binPath)
os.system("cat query_res0.txt* > all_query_res0_taosc.txt")
os.system("cat query_res1.txt* > all_query_res1_taosc.txt")
os.system("cat query_res2.txt* > all_query_res2_taosc.txt")
-
- # correct Times testcases
+
+ # correct Times testcases
queryTimes0Taosc = self.queryTimesTaosc("all_query_res0_taosc.txt")
- self.assertCheck("all_query_res0_taosc.txt",queryTimes0Taosc,6)
+ self.assertCheck("all_query_res0_taosc.txt", queryTimes0Taosc, 6)
queryTimes1Taosc = self.queryTimesTaosc("all_query_res1_taosc.txt")
- self.assertCheck("all_query_res1_taosc.txt",queryTimes1Taosc,6)
+ self.assertCheck("all_query_res1_taosc.txt", queryTimes1Taosc, 6)
queryTimes2Taosc = self.queryTimesTaosc("all_query_res2_taosc.txt")
- self.assertCheck("all_query_res2_taosc.txt",queryTimes2Taosc,20)
-
+ self.assertCheck("all_query_res2_taosc.txt", queryTimes2Taosc, 20)
+
# correct data testcase
- self.assertfileDataTaosc("all_query_res0_taosc.txt","1604160000099")
- self.assertfileDataTaosc("all_query_res1_taosc.txt","100")
- self.assertfileDataTaosc("all_query_res2_taosc.txt","1604160000199")
-
+ self.assertfileDataTaosc("all_query_res0_taosc.txt", "1604160000099")
+ self.assertfileDataTaosc("all_query_res1_taosc.txt", "100")
+ self.assertfileDataTaosc("all_query_res2_taosc.txt", "1604160000199")
+
# delete useless files
- os.system("rm -rf ./query_res*")
+ os.system("rm -rf ./query_res*")
os.system("rm -rf ./all_query*")
-
# use restful api to query
- os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertrestdata.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/queryRestful.json" % binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryInsertrestdata.json" %
+ binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryRestful.json" %
+ binPath)
os.system("cat query_res0.txt* > all_query_res0_rest.txt")
os.system("cat query_res1.txt* > all_query_res1_rest.txt")
os.system("cat query_res2.txt* > all_query_res2_rest.txt")
-
- # correct Times testcases
+
+ # correct Times testcases
queryTimes0Restful = self.queryTimesRestful("all_query_res0_rest.txt")
- self.assertCheck("all_query_res0_rest.txt",queryTimes0Restful,6)
+ self.assertCheck("all_query_res0_rest.txt", queryTimes0Restful, 6)
queryTimes1Restful = self.queryTimesRestful("all_query_res1_rest.txt")
- self.assertCheck("all_query_res1_rest.txt",queryTimes1Restful,6)
-
+ self.assertCheck("all_query_res1_rest.txt", queryTimes1Restful, 6)
+
queryTimes2Restful = self.queryTimesRestful("all_query_res2_rest.txt")
- self.assertCheck("all_query_res2_rest.txt",queryTimes2Restful,4)
+ self.assertCheck("all_query_res2_rest.txt", queryTimes2Restful, 4)
# correct data testcase
data0 = self.getfileDataRestful("all_query_res0_rest.txt")
- self.assertCheck('all_query_res0_rest.txt',data0,"2020-11-01 00:00:00.009")
+ self.assertCheck(
+ 'all_query_res0_rest.txt',
+ data0,
+ "2020-11-01 00:00:00.009")
data1 = self.getfileDataRestful("all_query_res1_rest.txt")
- self.assertCheck('all_query_res1_rest.txt',data1,10)
+ self.assertCheck('all_query_res1_rest.txt', data1, 10)
data2 = self.getfileDataRestful("all_query_res2_rest.txt")
- self.assertCheck('all_query_res2_rest.txt',data2,"2020-11-01 00:00:00.004")
-
-
+ self.assertCheck(
+ 'all_query_res2_rest.txt',
+ data2,
+ "2020-11-01 00:00:00.004")
+
# query times less than or equal to 100
- os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" % binPath)
-
- #query result print QPS
- os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
- os.system("%staosdemo -f tools/taosdemoAllTest/queryQps.json" % binPath)
-
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" %
+ binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/querySpeciMutisql100.json" %
+ binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/querySuperMutisql100.json" %
+ binPath)
+
+ # query result print QPS
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" %
+ binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryQps.json" %
+ binPath)
+
# use illegal or out of range parameters query json file
- os.system("%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" % binPath)
- exceptcode = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimes0.json" % binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryInsertdata.json" %
+ binPath)
+ exceptcode = os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryTimes0.json" %
+ binPath)
assert exceptcode != 0
- exceptcode0 = os.system("%staosdemo -f tools/taosdemoAllTest/queryTimesless0.json" % binPath)
+ exceptcode0 = os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryTimesless0.json" %
+ binPath)
assert exceptcode0 != 0
- exceptcode1 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrentless0.json" % binPath)
+ exceptcode1 = os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryConcurrentless0.json" %
+ binPath)
assert exceptcode1 != 0
- exceptcode2 = os.system("%staosdemo -f tools/taosdemoAllTest/queryConcurrent0.json" % binPath)
+ exceptcode2 = os.system(
+ "%staosdemo -f tools/taosdemoAllTest/queryConcurrent0.json" %
+ binPath)
assert exceptcode2 != 0
- exceptcode3 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreadsless0.json" % binPath)
+ exceptcode3 = os.system(
+ "%staosdemo -f tools/taosdemoAllTest/querrThreadsless0.json" %
+ binPath)
assert exceptcode3 != 0
- exceptcode4 = os.system("%staosdemo -f tools/taosdemoAllTest/querrThreads0.json" % binPath)
+ exceptcode4 = os.system(
+ "%staosdemo -f tools/taosdemoAllTest/querrThreads0.json" %
+ binPath)
assert exceptcode4 != 0
# delete useless files
os.system("rm -rf ./insert_res.txt")
- os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
- os.system("rm -rf ./querySystemInfo*")
- os.system("rm -rf ./query_res*")
- os.system("rm -rf ./all_query*")
+ os.system("rm -rf tools/taosdemoAllTest/*.py.sql")
+ os.system("rm -rf ./querySystemInfo*")
+ os.system("rm -rf ./query_res*")
+# os.system("rm -rf ./all_query*")
os.system("rm -rf ./test_query_res0.txt")
-
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
+
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
index c3fdff00ec15fc1ca0d55f86d430c5cbf86ad168..d8c68af0f9b43443744d7d799db6f5ee1e1dacaa 100644
--- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
+++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestSupportNanoInsert.py
@@ -36,7 +36,7 @@ class TDTestCase:
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
- buildPath = root[:len(root)-len("/build/bin")]
+ buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
@@ -46,14 +46,15 @@ class TDTestCase:
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
- binPath = buildPath+ "/build/bin/"
+ binPath = buildPath + "/build/bin/"
-
# insert: create one or mutiple tables per sql and insert multiple rows per sql
# insert data from a special timestamp
# check stable stb0
- os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " % binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabase.json -y " %
+ binPath)
tdSql.execute("use nsdb")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
@@ -64,9 +65,9 @@ class TDTestCase:
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
- tdSql.checkDataType(9, 1,"TIMESTAMP")
+ tdSql.checkDataType(9, 1, "TIMESTAMP")
tdSql.query("select last(ts) from stb0")
- tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
+ tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000")
# check stable stb1 which is insert with disord
@@ -78,16 +79,18 @@ class TDTestCase:
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb1")
- tdSql.checkDataType(9, 1,"TIMESTAMP")
+ tdSql.checkDataType(9, 1, "TIMESTAMP")
# check insert timestamp_step is nano_second
tdSql.query("select last(ts) from stb1")
- tdSql.checkData(0, 0,"2021-07-01 00:00:00.990000000")
-
+ tdSql.checkData(0, 0, "2021-07-01 00:00:00.990000000")
+
# insert data from now time
# check stable stb0
- os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " % binPath)
-
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json -y " %
+ binPath)
+
tdSql.execute("use nsdb2")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
@@ -99,11 +102,14 @@ class TDTestCase:
tdSql.checkData(0, 0, 10000)
# check c8 is an nano timestamp
tdSql.query("describe stb0")
- tdSql.checkDataType(9,1,"TIMESTAMP")
+ tdSql.checkDataType(9, 1, "TIMESTAMP")
+
+ # insert by csv files and timetamp is long int , strings in ts and
+ # cols
- # insert by csv files and timetamp is long int , strings in ts and cols
-
- os.system("%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " % binPath)
+ os.system(
+ "%staosdemo -f tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json -y " %
+ binPath)
tdSql.execute("use nsdbcsv")
tdSql.query("show stables")
tdSql.checkData(0, 4, 100)
@@ -111,29 +117,36 @@ class TDTestCase:
tdSql.checkData(0, 0, 10000)
tdSql.query("describe stb0")
tdSql.checkDataType(3, 1, "TIMESTAMP")
- tdSql.query("select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"")
+ tdSql.query(
+ "select count(*) from stb0 where ts > \"2021-07-01 00:00:00.490000000\"")
tdSql.checkData(0, 0, 5000)
tdSql.query("select count(*) from stb0 where ts < 1626918583000000000")
tdSql.checkData(0, 0, 10000)
-
+
os.system("rm -rf ./insert_res.txt")
os.system("rm -rf tools/taosdemoAllTest/taosdemoTestSupportNano*.py.sql")
- # taosdemo test insert with command and parameter , detals show taosdemo --help
- os.system("%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " % binPath)
+ # taosdemo test insert with command and parameter , detals show
+ # taosdemo --help
+ os.system(
+ "%staosdemo -u root -ptaosdata -P 6030 -a 1 -m pre -n 10 -T 20 -t 60 -o res.txt -y " %
+ binPath)
tdSql.query("select count(*) from test.meters")
tdSql.checkData(0, 0, 600)
# check taosdemo -s
- sqls_ls = ['drop database if exists nsdbsql;','create database nsdbsql precision "ns" keep 36 days 6 update 1;',
- 'use nsdbsql;','CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
- 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
- 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
- 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);']
+ sqls_ls = [
+ 'drop database if exists nsdbsql;',
+ 'create database nsdbsql precision "ns" keep 36500 days 6 update 1;',
+ 'use nsdbsql;',
+ 'CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int);',
+ 'CREATE TABLE d1001 USING meters TAGS ("Beijing.Chaoyang", 2);',
+ 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 219, 0.32);',
+ 'INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 85, 32, 0.76);']
- with open("./taosdemoTestNanoCreateDB.sql",mode ="a" ) as sql_files:
+ with open("./taosdemoTestNanoCreateDB.sql", mode="a") as sql_files:
for sql in sqls_ls:
- sql_files.write(sql+"\n")
+ sql_files.write(sql + "\n")
sql_files.close()
sleep(10)
@@ -141,11 +154,10 @@ class TDTestCase:
os.system("%staosdemo -s taosdemoTestNanoCreateDB.sql -y " % binPath)
tdSql.query("select count(*) from nsdbsql.meters")
tdSql.checkData(0, 0, 2)
-
+
os.system("rm -rf ./res.txt")
os.system("rm -rf ./*.py.sql")
os.system("rm -rf ./taosdemoTestNanoCreateDB.sql")
-
def stop(self):
tdSql.close()
diff --git a/tests/pytest/tools/taosdemoTestTblAlt.py b/tests/pytest/tools/taosdemoTestTblAlt.py
index 70df535f59cbb97469b7a73e4e230d9a8671bfc7..89c1b92e140cb1e19b549d3248693153e116c52e 100644
--- a/tests/pytest/tools/taosdemoTestTblAlt.py
+++ b/tests/pytest/tools/taosdemoTestTblAlt.py
@@ -54,27 +54,36 @@ class TDTestCase:
binPath = buildPath + "/build/bin/"
if(threadID == 0):
- os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT -m t" %
+ print("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" %
+ (binPath, self.numberOfTables, self.numberOfRecords))
+ os.system("%staosdemo -y -t %d -n %d -b INT,INT,INT,INT" %
(binPath, self.numberOfTables, self.numberOfRecords))
if(threadID == 1):
time.sleep(2)
print("use test")
- while True:
+ max_try = 100
+ count = 0
+ while (count < max_try):
try:
tdSql.execute("use test")
break
except Exception as e:
tdLog.info("use database test failed")
- time.sleep(1)
+ time.sleep(2)
+ count += 1
+ print("try %d times" % count)
continue
# check if all the tables have heen created
- while True:
+ count = 0
+ while (count < max_try):
try:
tdSql.query("show tables")
except Exception as e:
tdLog.info("show tables test failed")
- time.sleep(1)
+ time.sleep(2)
+ count += 1
+ print("try %d times" % count)
continue
rows = tdSql.queryRows
@@ -83,13 +92,17 @@ class TDTestCase:
break
time.sleep(1)
# check if there are any records in the last created table
- while True:
+ count = 0
+ while (count < max_try):
print("query started")
+ print("try %d times" % count)
try:
- tdSql.query("select * from test.t7")
+ tdSql.query("select * from test.d7")
except Exception as e:
tdLog.info("select * test failed")
time.sleep(2)
+ count += 1
+ print("try %d times" % count)
continue
rows = tdSql.queryRows
@@ -100,8 +113,8 @@ class TDTestCase:
print("alter table test.meters add column c10 int")
tdSql.execute("alter table test.meters add column c10 int")
- print("insert into test.t7 values (now, 1, 2, 3, 4, 0)")
- tdSql.execute("insert into test.t7 values (now, 1, 2, 3, 4, 0)")
+ print("insert into test.d7 values (now, 1, 2, 3, 4, 0)")
+ tdSql.execute("insert into test.d7 values (now, 1, 2, 3, 4, 0)")
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/update/update_options.py b/tests/pytest/update/update_options.py
index dd1b82fc596a3a977b028234900337474b971ec2..f70ac6cc1d4299e481af30e2e9d24c3b24631856 100644
--- a/tests/pytest/update/update_options.py
+++ b/tests/pytest/update/update_options.py
@@ -63,6 +63,7 @@ class TDTestCase:
tdLog.sleep(3)
# test case for https://jira.taosdata.com:18080/browse/TS-402
+ tdLog.info("test case for update option 1")
tdSql.execute("create database test update 1")
tdSql.execute("use test")
@@ -75,7 +76,39 @@ class TDTestCase:
tdSql.checkData(0, 2, None)
tdSql.checkData(0, 3, 9)
-
+ tdSql.execute("drop table if exists tb")
+ tdSql.execute("create table tb (ts timestamp, c1 int, c2 int, c3 int)")
+ tdSql.execute("insert into tb values(%d, 1, 2, 3)(%d, null, 4, 5)(%d, 6, null, 7)" % (self.ts, self.ts, self.ts))
+
+ tdSql.query("select * from tb")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 6)
+ tdSql.checkData(0, 2, None)
+ tdSql.checkData(0, 3, 7)
+
+ # https://jira.taosdata.com:18080/browse/TS-424
+ tdLog.info("test case for update option 2")
+ tdSql.execute("create database db2 update 2")
+ tdSql.execute("use db2")
+
+ tdSql.execute("create table tb (ts timestamp, c1 int, c2 int, c3 int)")
+ tdSql.execute("insert into tb values(%d, 1, 2, 3)(%d, null, null, 9)" % (self.ts, self.ts))
+
+ tdSql.query("select * from tb")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(0, 2, 2)
+ tdSql.checkData(0, 3, 9)
+
+ tdSql.execute("drop table if exists tb")
+ tdSql.execute("create table tb (ts timestamp, c1 int, c2 int, c3 int)")
+ tdSql.execute("insert into tb values(%d, 1, 2, 3)(%d, null, 4, 5)(%d, 6, null, 7)" % (self.ts, self.ts, self.ts))
+
+ tdSql.query("select * from tb")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 1, 6)
+ tdSql.checkData(0, 2, 4)
+ tdSql.checkData(0, 3, 7)
def stop(self):
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index 2d854643b8a2980bf38d4aacc3c20ab8843abdf8..55c964c2557eff3204cf31bfb63cd5e3f3dd5501 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -275,7 +275,7 @@ class TDDnode:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/taosd"
- blm3BinPath = buildPath + "/build/bin/blm3"
+ taosadapterBinPath = buildPath + "/build/bin/taosadapter"
if self.deployed == 0:
tdLog.exit("dnode:%d is not deployed" % (self.index))
@@ -291,10 +291,10 @@ class TDDnode:
print(cmd)
- blm3Cmd = "nohup %s > /dev/null 2>&1 & " % (
- blm3BinPath)
- if os.system(blm3Cmd) != 0:
- tdLog.exit(blm3Cmd)
+ taosadapterCmd = "nohup %s > /dev/null 2>&1 & " % (
+ taosadapterBinPath)
+ if os.system(taosadapterCmd) != 0:
+ tdLog.exit(taosadapterCmd)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -340,7 +340,7 @@ class TDDnode:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/taosd"
- blm3BinPath = buildPath + "/build/bin/blm3"
+ taosadapterBinPath = buildPath + "/build/bin/taosadapter"
if self.deployed == 0:
tdLog.exit("dnode:%d is not deployed" % (self.index))
@@ -356,9 +356,9 @@ class TDDnode:
print(cmd)
- blm3Cmd = "%s > /dev/null 2>&1 & " % (blm3BinPath)
- if os.system(blm3Cmd) != 0:
- tdLog.exit(blm3Cmd)
+ taosadapterCmd = "%s > /dev/null 2>&1 & " % (taosadapterBinPath)
+ if os.system(taosadapterCmd) != 0:
+ tdLog.exit(taosadapterCmd)
if os.system(cmd) != 0:
tdLog.exit(cmd)
@@ -366,18 +366,18 @@ class TDDnode:
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
def stop(self):
- blm3ToBeKilled = "blm3"
+ taosadapterToBeKilled = "taosadapter"
- blm3PsCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % blm3ToBeKilled
- blm3ProcessID = subprocess.check_output(
- blm3PsCmd, shell=True).decode("utf-8")
+ taosadapterPsCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % taosadapterToBeKilled
+ taosadapterProcessID = subprocess.check_output(
+ taosadapterPsCmd, shell=True).decode("utf-8")
- while(blm3ProcessID):
- blm3KillCmd = "kill -INT %s > /dev/null 2>&1" % blm3ProcessID
- os.system(blm3KillCmd)
+ while(taosadapterProcessID):
+ taosadapterKillCmd = "kill -INT %s > /dev/null 2>&1" % taosadapterProcessID
+ os.system(taosadapterKillCmd)
time.sleep(1)
- blm3ProcessID = subprocess.check_output(
- blm3PsCmd, shell=True).decode("utf-8")
+ taosadapterProcessID = subprocess.check_output(
+ taosadapterPsCmd, shell=True).decode("utf-8")
if self.valgrind == 0:
toBeKilled = "taosd"
diff --git a/tests/script/api/openTSDBTest.c b/tests/script/api/openTSDBTest.c
index 70048e17fcaf6d609274d561b8d206490c53dd96..8b70a324ea55c905c9a8bdaf67de9c258f9d57d7 100644
--- a/tests/script/api/openTSDBTest.c
+++ b/tests/script/api/openTSDBTest.c
@@ -22,9 +22,9 @@ void verify_telnet_insert(TAOS* taos) {
/* metric */
char* lines0[] = {
- "stb0_0 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
- "stb0_1 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
- "stb0_2 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
+ "stb0_0 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
+ "stb0_1 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
+ "stb0_2 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
};
result = taos_schemaless_insert(taos, lines0, 3, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -35,15 +35,11 @@ void verify_telnet_insert(TAOS* taos) {
/* timestamp */
char* lines1[] = {
- "stb1 1626006833s 1i8 host=\"host0\"",
- "stb1 1626006833639000000ns 2i8 host=\"host0\"",
- "stb1 1626006833640000us 3i8 host=\"host0\"",
- "stb1 1626006833641 4i8 host=\"host0\"",
- "stb1 1626006832 5i8 host=\"host0\"",
- "stb1 1626006833651ms 6i8 host=\"host0\"",
- "stb1 0 7i8 host=\"host0\"",
+ "stb1 1626006833641 1i8 host=\"host0\"",
+ "stb1 1626006832 2i8 host=\"host0\"",
+ "stb1 0 3i8 host=\"host0\"",
};
- result = taos_schemaless_insert(taos, lines1, 7, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
+ result = taos_schemaless_insert(taos, lines1, 3, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
if (code) {
printf("lines1 code: %d, %s.\n", code, tstrerror(code));
@@ -53,8 +49,8 @@ void verify_telnet_insert(TAOS* taos) {
/* metric value */
//tinyint
char* lines2_0[] = {
- "stb2_0 1626006833651ms -127i8 host=\"host0\"",
- "stb2_0 1626006833652ms 127i8 host=\"host0\""
+ "stb2_0 1626006833651 -127i8 host=\"host0\"",
+ "stb2_0 1626006833652 127i8 host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_0, 2, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -65,8 +61,8 @@ void verify_telnet_insert(TAOS* taos) {
//smallint
char* lines2_1[] = {
- "stb2_1 1626006833651ms -32767i16 host=\"host0\"",
- "stb2_1 1626006833652ms 32767i16 host=\"host0\""
+ "stb2_1 1626006833651 -32767i16 host=\"host0\"",
+ "stb2_1 1626006833652 32767i16 host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_1, 2, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -77,8 +73,8 @@ void verify_telnet_insert(TAOS* taos) {
//int
char* lines2_2[] = {
- "stb2_2 1626006833651ms -2147483647i32 host=\"host0\"",
- "stb2_2 1626006833652ms 2147483647i32 host=\"host0\""
+ "stb2_2 1626006833651 -2147483647i32 host=\"host0\"",
+ "stb2_2 1626006833652 2147483647i32 host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_2, 2, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -89,8 +85,8 @@ void verify_telnet_insert(TAOS* taos) {
//bigint
char* lines2_3[] = {
- "stb2_3 1626006833651ms -9223372036854775807i64 host=\"host0\"",
- "stb2_3 1626006833652ms 9223372036854775807i64 host=\"host0\""
+ "stb2_3 1626006833651 -9223372036854775807i64 host=\"host0\"",
+ "stb2_3 1626006833652 9223372036854775807i64 host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_3, 2, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -101,16 +97,16 @@ void verify_telnet_insert(TAOS* taos) {
//float
char* lines2_4[] = {
- "stb2_4 1626006833610ms 3f32 host=\"host0\"",
- "stb2_4 1626006833620ms -3f32 host=\"host0\"",
- "stb2_4 1626006833630ms 3.4f32 host=\"host0\"",
- "stb2_4 1626006833640ms -3.4f32 host=\"host0\"",
- "stb2_4 1626006833650ms 3.4E10f32 host=\"host0\"",
- "stb2_4 1626006833660ms -3.4e10f32 host=\"host0\"",
- "stb2_4 1626006833670ms 3.4E+2f32 host=\"host0\"",
- "stb2_4 1626006833680ms -3.4e-2f32 host=\"host0\"",
- "stb2_4 1626006833700ms 3.4E38f32 host=\"host0\"",
- "stb2_4 1626006833710ms -3.4E38f32 host=\"host0\""
+ "stb2_4 1626006833610 3f32 host=\"host0\"",
+ "stb2_4 1626006833620 -3f32 host=\"host0\"",
+ "stb2_4 1626006833630 3.4f32 host=\"host0\"",
+ "stb2_4 1626006833640 -3.4f32 host=\"host0\"",
+ "stb2_4 1626006833650 3.4E10f32 host=\"host0\"",
+ "stb2_4 1626006833660 -3.4e10f32 host=\"host0\"",
+ "stb2_4 1626006833670 3.4E+2f32 host=\"host0\"",
+ "stb2_4 1626006833680 -3.4e-2f32 host=\"host0\"",
+ "stb2_4 1626006833700 3.4E38f32 host=\"host0\"",
+ "stb2_4 1626006833710 -3.4E38f32 host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_4, 10, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -121,17 +117,17 @@ void verify_telnet_insert(TAOS* taos) {
//double
char* lines2_5[] = {
- "stb2_5 1626006833610ms 3f64 host=\"host0\"",
- "stb2_5 1626006833620ms -3f64 host=\"host0\"",
- "stb2_5 1626006833630ms 3.4f64 host=\"host0\"",
- "stb2_5 1626006833640ms -3.4f64 host=\"host0\"",
- "stb2_5 1626006833650ms 3.4E10f64 host=\"host0\"",
- "stb2_5 1626006833660ms -3.4e10f64 host=\"host0\"",
- "stb2_5 1626006833670ms 3.4E+2f64 host=\"host0\"",
- "stb2_5 1626006833680ms -3.4e-2f64 host=\"host0\"",
- "stb2_5 1626006833690ms 1.7E308f64 host=\"host0\"",
- "stb2_5 1626006833700ms -1.7E308f64 host=\"host0\"",
- "stb2_5 1626006833710ms 3.15 host=\"host0\""
+ "stb2_5 1626006833610 3f64 host=\"host0\"",
+ "stb2_5 1626006833620 -3f64 host=\"host0\"",
+ "stb2_5 1626006833630 3.4f64 host=\"host0\"",
+ "stb2_5 1626006833640 -3.4f64 host=\"host0\"",
+ "stb2_5 1626006833650 3.4E10f64 host=\"host0\"",
+ "stb2_5 1626006833660 -3.4e10f64 host=\"host0\"",
+ "stb2_5 1626006833670 3.4E+2f64 host=\"host0\"",
+ "stb2_5 1626006833680 -3.4e-2f64 host=\"host0\"",
+ "stb2_5 1626006833690 1.7E308f64 host=\"host0\"",
+ "stb2_5 1626006833700 -1.7E308f64 host=\"host0\"",
+ "stb2_5 1626006833710 3.15 host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_5, 11, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -142,16 +138,16 @@ void verify_telnet_insert(TAOS* taos) {
//bool
char* lines2_6[] = {
- "stb2_6 1626006833610ms t host=\"host0\"",
- "stb2_6 1626006833620ms T host=\"host0\"",
- "stb2_6 1626006833630ms true host=\"host0\"",
- "stb2_6 1626006833640ms True host=\"host0\"",
- "stb2_6 1626006833650ms TRUE host=\"host0\"",
- "stb2_6 1626006833660ms f host=\"host0\"",
- "stb2_6 1626006833670ms F host=\"host0\"",
- "stb2_6 1626006833680ms false host=\"host0\"",
- "stb2_6 1626006833690ms False host=\"host0\"",
- "stb2_6 1626006833700ms FALSE host=\"host0\""
+ "stb2_6 1626006833610 t host=\"host0\"",
+ "stb2_6 1626006833620 T host=\"host0\"",
+ "stb2_6 1626006833630 true host=\"host0\"",
+ "stb2_6 1626006833640 True host=\"host0\"",
+ "stb2_6 1626006833650 TRUE host=\"host0\"",
+ "stb2_6 1626006833660 f host=\"host0\"",
+ "stb2_6 1626006833670 F host=\"host0\"",
+ "stb2_6 1626006833680 false host=\"host0\"",
+ "stb2_6 1626006833690 False host=\"host0\"",
+ "stb2_6 1626006833700 FALSE host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_6, 10, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -162,9 +158,9 @@ void verify_telnet_insert(TAOS* taos) {
//binary
char* lines2_7[] = {
- "stb2_7 1626006833610ms \"binary_val.!@#$%^&*\" host=\"host0\"",
- "stb2_7 1626006833620ms \"binary_val.:;,./?|+-=\" host=\"host0\"",
- "stb2_7 1626006833630ms \"binary_val.()[]{}<>\" host=\"host0\""
+ "stb2_7 1626006833610 \"binary_val.!@#$%^&*\" host=\"host0\"",
+ "stb2_7 1626006833620 \"binary_val.:;,./?|+-=\" host=\"host0\"",
+ "stb2_7 1626006833630 \"binary_val.()[]{}<>\" host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_7, 3, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -175,8 +171,8 @@ void verify_telnet_insert(TAOS* taos) {
//nchar
char* lines2_8[] = {
- "stb2_8 1626006833610ms L\"nchar_val数值一\" host=\"host0\"",
- "stb2_8 1626006833620ms L\"nchar_val数值二\" host=\"host0\""
+ "stb2_8 1626006833610 L\"nchar_val数值一\" host=\"host0\"",
+ "stb2_8 1626006833620 L\"nchar_val数值二\" host=\"host0\""
};
result = taos_schemaless_insert(taos, lines2_8, 2, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -188,8 +184,8 @@ void verify_telnet_insert(TAOS* taos) {
/* tags */
//tag value types
char* lines3_0[] = {
- "stb3_0 1626006833610ms 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"",
- "stb3_0 1626006833610ms 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\""
+ "stb3_0 1626006833610 1 t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=3.4E38f32 t6=1.7E308f64 t7=true t8=\"binary_val_1\" t9=L\"标签值1\"",
+ "stb3_0 1626006833610 2 t1=-127i8 t2=-32767i16 t3=-2147483647i32 t4=-9223372036854775807i64 t5=-3.4E38f32 t6=-1.7E308f64 t7=false t8=\"binary_val_2\" t9=L\"标签值2\""
};
result = taos_schemaless_insert(taos, lines3_0, 2, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
@@ -200,9 +196,9 @@ void verify_telnet_insert(TAOS* taos) {
//tag ID as child table name
char* lines3_1[] = {
- "stb3_1 1626006833610ms 1 id=child_table1 host=host1",
- "stb3_1 1626006833610ms 2 host=host2 iD=child_table2",
- "stb3_1 1626006833610ms 3 ID=child_table3 host=host3"
+ "stb3_1 1626006833610 1 id=child_table1 host=host1",
+ "stb3_1 1626006833610 2 host=host2 iD=child_table2",
+ "stb3_1 1626006833610 3 ID=child_table3 host=host3"
};
result = taos_schemaless_insert(taos, lines3_1, 3, TSDB_SML_TELNET_PROTOCOL, TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
code = taos_errno(result);
diff --git a/tests/script/fullGeneralSuite.sim b/tests/script/fullGeneralSuite.sim
index ec72827c9697cbb30a5845ff5f2a2f809ada4164..ebc054777940430e9cdb78b55b496dda873f2143 100644
--- a/tests/script/fullGeneralSuite.sim
+++ b/tests/script/fullGeneralSuite.sim
@@ -228,4 +228,6 @@ run general/db/show_create_table.sim
run general/parser/like.sim
run general/parser/regex.sim
run general/parser/tbname_escape.sim
+run general/parser/columnName_escape.sim
+run general/parser/tagName_escape.sim
run general/parser/interp_blocks.sim
diff --git a/tests/script/general/compute/csum.sim b/tests/script/general/compute/csum.sim
index 4d6f748566fdfedc3b6ac2ccf5fa6a22c7a5340f..e7a2c2065029ab58e92c0d5643262a01875843be 100644
--- a/tests/script/general/compute/csum.sim
+++ b/tests/script/general/compute/csum.sim
@@ -100,6 +100,158 @@ if $data11 != -2 then
return -1
endi
+print ==========>TD10758
+sql create database groupby_tbname
+sql use groupby_tbname
+sql create stable st(ts timestamp, c1 int) tags(t int);
+sql create table ct1 using st tags(1)
+sql insert into ct1 values(now, 1)(now+1s, 2)(now+2s, 3)
+sql create table ct2 using st tags(2)
+sql insert into ct2 values(now, 21)(now+1s, 22)(now+2s, 23)
+sql create table ct3 using st tags(3)
+sql insert into ct3 values(now, 31)(now+1s, 32)(now+2s, 33)
+
+sql select csum(c1),ts,tbname,t from ct1
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select csum(c1),ts,tbname,t from st group by tbname
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select diff(c1),ts,tbname,t from ct1
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select diff(c1),ts,tbname,t from st group by tbname
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select mavg(c1,2),ts,tbname,t from ct1
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select mavg(c1,2),ts,tbname,t from st group by tbname
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select derivative(c1,1s,0),ts,tbname,t from ct1
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+sql select derivative(c1,1s,0),ts,tbname,t from st group by tbname
+print $data10 , $data11 , $data12, $data13, $data14
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != 1 then
+ return -1
+endi
+
+sql select mavg(c1,2),tbname from st group by tbname
+print $data10 , $data11 , $data12 , $data13
+if $data12 != ct1 then
+ return -1
+endi
+if $data13 != ct1 then
+ return -1
+endi
+
+sql select diff(c1),tbname from st group by tbname
+print $data10 , $data11 , $data12 , $data13
+if $data12 != ct1 then
+ return -1
+endi
+if $data13 != ct1 then
+ return -1
+endi
+
+sql select csum(c1),tbname from st group by tbname
+print $data10 , $data11 , $data12, $data13, $data14
+print $data10 , $data11 , $data12 , $data13
+if $data12 != ct1 then
+ return -1
+endi
+if $data13 != ct1 then
+ return -1
+endi
+
+sql select csum(c1),t,tbname from st group by tbname limit 2
+print $data10 , $data11 , $data12 , $data13 , $data14
+print $data30 , $data31 , $data32 , $data33 , $data34
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != ct1 then
+ return -1
+endi
+if $data33 != ct2 then
+ return -1
+endi
+if $data34 != ct2 then
+ return -1
+endi
+
+sql select mavg(c1,2),t,tbname from st group by tbname limit 2
+print $data10 , $data11 , $data12 , $data13 , $data14
+print $data30 , $data31 , $data32 , $data33 , $data34
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != ct1 then
+ return -1
+endi
+if $data33 != ct2 then
+ return -1
+endi
+if $data34 != ct2 then
+ return -1
+endi
+sql select diff(c1),t,tbname from st group by tbname limit 2
+print $data10 , $data11 , $data12 , $data13 , $data14
+print $data30 , $data31 , $data32 , $data33 , $data34
+if $data13 != ct1 then
+ return -1
+endi
+if $data14 != ct1 then
+ return -1
+endi
+if $data33 != ct2 then
+ return -1
+endi
+if $data34 != ct2 then
+ return -1
+endi
+sql drop database groupby_tbname
+
print =============== clear
sql drop database $db
sql show databases
diff --git a/tests/script/general/parser/alter.sim b/tests/script/general/parser/alter.sim
index d1a4702a69dce22e7de0de005d912e7813648e01..005a8586a4b2aa0419d5a2f9e390707a2236ff43 100644
--- a/tests/script/general/parser/alter.sim
+++ b/tests/script/general/parser/alter.sim
@@ -86,12 +86,13 @@ endi
if $data07 != 11,12,13 then
return -1
endi
-sql alter database $db keep 365000,365000,365000
+sql_error alter database $db keep 365000,365000,365000
+sql alter database $db keep 36500,36500,36500
sql show databases
if $rows != 1 then
return -1
endi
-if $data07 != 365000,365000,365000 then
+if $data07 != 36500,36500,36500 then
return -1
endi
diff --git a/tests/script/general/parser/alter__for_community_version.sim b/tests/script/general/parser/alter__for_community_version.sim
index f55fb812a74eead44e54808b000a48c3db92b66d..0378b527e96530cacc0027d3d7d3e28105615e2a 100644
--- a/tests/script/general/parser/alter__for_community_version.sim
+++ b/tests/script/general/parser/alter__for_community_version.sim
@@ -79,15 +79,16 @@ endi
if $data07 != 13 then
return -1
endi
-sql alter database $db keep 365000
+sql alter database $db keep 36500
sql show databases
if $rows != 1 then
return -1
endi
-if $data07 != 365000 then
+if $data07 != 36500 then
return -1
endi
+sql_error alter database $db keep 365000
##### alter table test, simeplest case
sql create table tb (ts timestamp, c1 int, c2 int, c3 int)
diff --git a/tests/script/general/parser/columnName_escape.sim b/tests/script/general/parser/columnName_escape.sim
new file mode 100644
index 0000000000000000000000000000000000000000..dd3278d0dc98fa5378b7aed122dc39f6717372d5
--- /dev/null
+++ b/tests/script/general/parser/columnName_escape.sim
@@ -0,0 +1,426 @@
+system sh/stop_dnodes.sh
+
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+
+sleep 100
+sql connect
+print ======================== dnode1 start
+
+sql create database colesc;
+
+sql use colesc;
+
+print ======================= test create table/stable
+
+
+sql create table tb0 (ts timestamp, `123` int, `123 456` int, `123.abc` int)
+sql create table tb1 (ts timestamp, `!%^&*()` int)
+sql create table tb2 (ts timestamp, `int` int, `bool` int, `double` int, `INTO` int, `COLUMN` int)
+
+sql create table stb0 (ts timestamp, `123` int, `123 456` int, `123.abc` int) tags (t1 int)
+sql create table stb1 (ts timestamp, `!%^&*()` int) tags (t2 int)
+sql create table stb2 (ts timestamp, `int` int, `bool` int, `double` int, `INTO` int, `COLUMN` int) tags (t3 int)
+
+sql create table ctb0 using stb0 tags (1)
+sql create table ctb1 using stb1 tags (1)
+sql create table ctb2 using stb2 tags (1)
+
+##check table
+sql describe tb0;
+if $rows != 4 then
+ return -1
+endi
+if $data10 != @123@ then
+ return -1
+endi
+if $data20 != @123 456@ then
+ return -1
+endi
+if $data30 != @123.abc@ then
+ return -1
+endi
+
+sql describe tb1;
+if $rows != 2 then
+ return -1
+endi
+if $data10 != @!%^&*()@ then
+ return -1
+endi
+
+sql describe tb2;
+if $rows != 6 then
+ return -1
+endi
+if $data10 != @int@ then
+ return -1
+endi
+if $data20 != @bool@ then
+ return -1
+endi
+if $data30 != @double@ then
+ return -1
+endi
+if $data40 != @INTO@ then
+ return -1
+endi
+if $data50 != @COLUMN@ then
+ return -1
+endi
+##check stable
+sql describe stb0;
+if $rows != 5 then
+ return -1
+endi
+if $data10 != @123@ then
+ return -1
+endi
+if $data20 != @123 456@ then
+ return -1
+endi
+if $data30 != @123.abc@ then
+ return -1
+endi
+
+sql describe stb1;
+if $rows != 3 then
+ return -1
+endi
+if $data10 != @!%^&*()@ then
+ return -1
+endi
+
+sql describe stb2;
+if $rows != 7 then
+ return -1
+endi
+if $data10 != @int@ then
+ return -1
+endi
+if $data20 != @bool@ then
+ return -1
+endi
+if $data30 != @double@ then
+ return -1
+endi
+if $data40 != @INTO@ then
+ return -1
+endi
+if $data50 != @COLUMN@ then
+ return -1
+endi
+
+
+print ======================= test Alter columns for table/stable
+
+##Add column
+sql_error alter table tb0 add column `123` int
+sql_error alter table tb0 add column `123 456` int
+sql_error alter table tb0 add column `123.abc` int
+
+sql_error alter table ctb0 add column `1234`
+
+sql alter table tb0 add column `!%^&*()` int
+sql alter table tb0 add column `int` int
+sql alter table tb0 add column `bool` int
+sql alter table tb0 add column `double` int
+sql alter table tb0 add column `INTO` nchar(10)
+sql alter table tb0 add column `COLUMN` binary(10)
+
+sql alter table stb0 add column `!%^&*()` int
+sql alter table stb0 add column `int` int
+sql alter table stb0 add column `bool` int
+sql alter table stb0 add column `double` int
+sql alter table stb0 add column `INTO` nchar(10)
+sql alter table stb0 add column `COLUMN` binary(10)
+
+
+##check table
+sql describe tb0;
+if $rows != 10 then
+ return -1
+endi
+if $data40 != @!%^&*()@ then
+ return -1
+endi
+if $data50 != @int@ then
+ return -1
+endi
+if $data60 != @bool@ then
+ return -1
+endi
+if $data70 != @double@ then
+ return -1
+endi
+if $data80 != @INTO@ then
+ return -1
+endi
+if $data90 != @COLUMN@ then
+ return -1
+endi
+
+#check stable
+sql describe stb0;
+if $rows != 11 then
+ return -1
+endi
+if $data40 != @!%^&*()@ then
+ return -1
+endi
+if $data50 != @int@ then
+ return -1
+endi
+if $data60 != @bool@ then
+ return -1
+endi
+if $data70 != @double@ then
+ return -1
+endi
+if $data80 != @INTO@ then
+ return -1
+endi
+if $data90 != @COLUMN@ then
+ return -1
+endi
+
+##Drop column
+
+sql_error alter table ctb0 drop column `123`
+sql_error alter table ctb0 drop column `123 456`
+sql_error alter table ctb0 drop column `123.abc`
+
+sql alter table tb0 drop column `!%^&*()`
+sql alter table tb0 drop column `int`
+sql alter table tb0 drop column `bool`
+sql alter table tb0 drop column `double`
+sql alter table tb0 drop column `INTO`
+sql alter table tb0 drop column `COLUMN`
+
+sql alter table stb0 drop column `!%^&*()`
+sql alter table stb0 drop column `int`
+sql alter table stb0 drop column `bool`
+sql alter table stb0 drop column `double`
+sql alter table stb0 drop column `INTO`
+sql alter table stb0 drop column `COLUMN`
+
+##check table
+sql describe tb0;
+if $rows != 4 then
+ return -1
+endi
+if $data10 != @123@ then
+ return -1
+endi
+if $data20 != @123 456@ then
+ return -1
+endi
+if $data30 != @123.abc@ then
+ return -1
+endi
+
+##check stable
+sql describe stb0;
+if $rows != 5 then
+ return -1
+endi
+if $data10 != @123@ then
+ return -1
+endi
+if $data20 != @123 456@ then
+ return -1
+endi
+if $data30 != @123.abc@ then
+ return -1
+endi
+
+##Modify column for binary/nchar length
+
+sql alter table tb0 add column `INTO` nchar(10)
+sql alter table tb0 add column `COLUMN` binary(10)
+
+sql alter table stb0 add column `INTO` nchar(10)
+sql alter table stb0 add column `COLUMN` binary(10)
+
+sql alter table tb0 modify column `INTO` nchar(15)
+sql alter table tb0 modify column `COLUMN` binary(15)
+
+sql alter table stb0 modify column `INTO` nchar(15)
+sql alter table stb0 modify column `COLUMN` binary(15)
+
+sql describe tb0;
+if $rows != 6 then
+ return -1
+endi
+if $data42 != @15@ then
+ return -1
+endi
+if $data52 != @15@ then
+ return -1
+endi
+
+sql describe stb0;
+if $rows != 7 then
+ return -1
+endi
+if $data42 != @15@ then
+ return -1
+endi
+if $data52 != @15@ then
+ return -1
+endi
+
+print ======================= test insert columns for table/stable
+
+sql insert into tb0 (ts, `123`, `123 456`, `123.abc`) values (now, 1, 1, 1)
+sql insert into tb1 (ts, `!%^&*()`) values (now, 1)
+sql insert into tb2 (ts, `int`, `bool`, `double`, `INTO`, `COLUMN`) values (now, 1, 1, 1, 1, 1)
+
+sql insert into ctb0 (ts, `123`, `123 456`, `123.abc`) values (now, 1, 1, 1)
+sql insert into ctb1 (ts, `!%^&*()`) values (now, 1)
+sql insert into ctb2 (ts, `int`, `bool`, `double`, `INTO`, `COLUMN`) values (now, 1, 1, 1, 1, 1)
+
+sql select * from tb0;
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from tb1;
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from tb2;
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from ctb0;
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from ctb1;
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from ctb2;
+if $rows != 1 then
+ return -1
+endi
+
+print ======================= test select columns for table/stable
+
+sql select `123`,`123 456`,`123.abc` from tb0;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data02 != 1 then
+ return -1
+endi
+
+sql select `!%^&*()` from tb1;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+sql select `int`,`bool`,`double`,`INTO`,`COLUMN` from tb2;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data02 != 1 then
+ return -1
+endi
+
+if $data03 != 1 then
+ return -1
+endi
+
+if $data04 != 1 then
+ return -1
+endi
+
+
+sql select `123`,`123 456`,`123.abc` from stb0;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data02 != 1 then
+ return -1
+endi
+
+sql select `!%^&*()` from stb1;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+sql select `int`,`bool`,`double`,`INTO`,`COLUMN` from stb2;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != 1 then
+ return -1
+endi
+
+if $data01 != 1 then
+ return -1
+endi
+
+if $data02 != 1 then
+ return -1
+endi
+
+if $data03 != 1 then
+ return -1
+endi
+
+if $data04 != 1 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/join.sim b/tests/script/general/parser/join.sim
index e2132589bd3a54d42e683094f184b3a4a4932f71..36deea0371486394125355cc92a9785764610569 100644
--- a/tests/script/general/parser/join.sim
+++ b/tests/script/general/parser/join.sim
@@ -445,7 +445,7 @@ if $rows != $val then
endi
print ================>TD-5600
-sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb1.ts>=100000 interval(1s) fill(linear);
+sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb1.ts>=100000 interval(1s);
#===============================================================
diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim
index f2c539dbf8b8bd68c6481e790198a28d860f0b92..22f18179a418b5779993c91a17d62848674e1774 100644
--- a/tests/script/general/parser/nestquery.sim
+++ b/tests/script/general/parser/nestquery.sim
@@ -549,4 +549,381 @@ if $data11 != 2.000000000 then
return -1
endi
+sql create database test2;
+sql use test2;
+sql create table meters (ts TIMESTAMP,a INT,b INT) TAGS (area INT);
+sql CREATE TABLE t0 USING meters TAGS (0);
+sql CREATE TABLE t1 USING meters TAGS (1);
+sql CREATE TABLE t2 USING meters TAGS (1);
+sql CREATE TABLE t3 USING meters TAGS (0);
+sql insert into t0 values ('2021-09-30 15:00:00.00',0,0);
+sql insert into t0 values ('2021-09-30 15:00:01.00',1,1);
+sql insert into t0 values ('2021-09-30 15:00:03.00',3,3);
+sql insert into t0 values ('2021-09-30 15:00:05.00',5,5);
+sql insert into t0 values ('2021-09-30 15:00:07.00',7,7);
+sql insert into t0 values ('2021-09-30 15:00:09.00',9,9);
+
+sql insert into t1 values ('2021-09-30 15:00:00.00',0,0);
+sql insert into t1 values ('2021-09-30 15:00:02.00',2,2);
+sql insert into t1 values ('2021-09-30 15:00:04.00',4,4);
+sql insert into t1 values ('2021-09-30 15:00:06.00',6,6);
+sql insert into t1 values ('2021-09-30 15:00:08.00',8,8);
+sql insert into t1 values ('2021-09-30 15:00:10.00',10,10);
+
+sql insert into t2 values ('2021-09-30 15:00:00.00',0,0);
+sql insert into t2 values ('2021-09-30 15:00:01.00',11,11);
+sql insert into t2 values ('2021-09-30 15:00:02.00',22,22);
+sql insert into t2 values ('2021-09-30 15:00:03.00',33,33);
+sql insert into t2 values ('2021-09-30 15:00:04.00',44,44);
+sql insert into t2 values ('2021-09-30 15:00:05.00',55,55);
+
+sql insert into t3 values ('2021-09-30 15:00:00.00',0,0);
+sql insert into t3 values ('2021-09-30 15:00:01.00',11,11);
+sql insert into t3 values ('2021-09-30 15:00:02.00',22,22);
+sql insert into t3 values ('2021-09-30 15:00:03.00',33,33);
+sql insert into t3 values ('2021-09-30 15:00:04.00',44,44);
+sql insert into t3 values ('2021-09-30 15:00:05.00',55,55);
+
+sql select count(*) from meters interval(1s) group by tbname;
+if $rows != 24 then
+ return -1
+endi
+
+sql select count(*) from (select count(*) from meters interval(1s) group by tbname) interval(1s);
+if $rows != 11 then
+ return -1
+endi
+if $data00 != @21-09-30 15:00:00.000@ then
+ return -1
+endi
+if $data01 != 4 then
+ return -1
+endi
+if $data10 != @21-09-30 15:00:01.000@ then
+ return -1
+endi
+if $data11 != 3 then
+ return -1
+endi
+if $data20 != @21-09-30 15:00:02.000@ then
+ return -1
+endi
+if $data21 != 3 then
+ return -1
+endi
+if $data30 != @21-09-30 15:00:03.000@ then
+ return -1
+endi
+if $data31 != 3 then
+ return -1
+endi
+if $data40 != @21-09-30 15:00:04.000@ then
+ return -1
+endi
+if $data41 != 3 then
+ return -1
+endi
+if $data50 != @21-09-30 15:00:05.000@ then
+ return -1
+endi
+if $data51 != 3 then
+ return -1
+endi
+if $data60 != @21-09-30 15:00:06.000@ then
+ return -1
+endi
+if $data61 != 1 then
+ return -1
+endi
+if $data70 != @21-09-30 15:00:07.000@ then
+ return -1
+endi
+if $data71 != 1 then
+ return -1
+endi
+if $data80 != @21-09-30 15:00:08.000@ then
+ return -1
+endi
+if $data81 != 1 then
+ return -1
+endi
+if $data90 != @21-09-30 15:00:09.000@ then
+ return -1
+endi
+if $data91 != 1 then
+ return -1
+endi
+
+sql select count(*) from (select count(*) from meters interval(1s) group by area) interval(1s);
+if $rows != 11 then
+ return -1
+endi
+if $data00 != @21-09-30 15:00:00.000@ then
+ return -1
+endi
+if $data01 != 2 then
+ return -1
+endi
+if $data10 != @21-09-30 15:00:01.000@ then
+ return -1
+endi
+if $data11 != 2 then
+ return -1
+endi
+if $data20 != @21-09-30 15:00:02.000@ then
+ return -1
+endi
+if $data21 != 2 then
+ return -1
+endi
+if $data30 != @21-09-30 15:00:03.000@ then
+ return -1
+endi
+if $data31 != 2 then
+ return -1
+endi
+if $data40 != @21-09-30 15:00:04.000@ then
+ return -1
+endi
+if $data41 != 2 then
+ return -1
+endi
+if $data50 != @21-09-30 15:00:05.000@ then
+ return -1
+endi
+if $data51 != 2 then
+ return -1
+endi
+if $data60 != @21-09-30 15:00:06.000@ then
+ return -1
+endi
+if $data61 != 1 then
+ return -1
+endi
+if $data70 != @21-09-30 15:00:07.000@ then
+ return -1
+endi
+if $data71 != 1 then
+ return -1
+endi
+if $data80 != @21-09-30 15:00:08.000@ then
+ return -1
+endi
+if $data81 != 1 then
+ return -1
+endi
+if $data90 != @21-09-30 15:00:09.000@ then
+ return -1
+endi
+if $data91 != 1 then
+ return -1
+endi
+
+
+sql select sum(sa) from (select sum(a) as sa from meters interval(1s) group by tbname) interval(1s);
+if $rows != 11 then
+ return -1
+endi
+if $data00 != @21-09-30 15:00:00.000@ then
+ return -1
+endi
+if $data01 != 0 then
+ return -1
+endi
+if $data10 != @21-09-30 15:00:01.000@ then
+ return -1
+endi
+if $data11 != 23 then
+ return -1
+endi
+if $data20 != @21-09-30 15:00:02.000@ then
+ return -1
+endi
+if $data21 != 46 then
+ return -1
+endi
+if $data30 != @21-09-30 15:00:03.000@ then
+ return -1
+endi
+if $data31 != 69 then
+ return -1
+endi
+if $data40 != @21-09-30 15:00:04.000@ then
+ return -1
+endi
+if $data41 != 92 then
+ return -1
+endi
+if $data50 != @21-09-30 15:00:05.000@ then
+ return -1
+endi
+if $data51 != 115 then
+ return -1
+endi
+if $data60 != @21-09-30 15:00:06.000@ then
+ return -1
+endi
+if $data61 != 6 then
+ return -1
+endi
+if $data70 != @21-09-30 15:00:07.000@ then
+ return -1
+endi
+if $data71 != 7 then
+ return -1
+endi
+if $data80 != @21-09-30 15:00:08.000@ then
+ return -1
+endi
+if $data81 != 8 then
+ return -1
+endi
+if $data90 != @21-09-30 15:00:09.000@ then
+ return -1
+endi
+if $data91 != 9 then
+ return -1
+endi
+
+sql select sum(sa) from (select sum(a) as sa from meters interval(1s) group by area) interval(1s);
+if $rows != 11 then
+ return -1
+endi
+if $data00 != @21-09-30 15:00:00.000@ then
+ return -1
+endi
+if $data01 != 0 then
+ return -1
+endi
+if $data10 != @21-09-30 15:00:01.000@ then
+ return -1
+endi
+if $data11 != 23 then
+ return -1
+endi
+if $data20 != @21-09-30 15:00:02.000@ then
+ return -1
+endi
+if $data21 != 46 then
+ return -1
+endi
+if $data30 != @21-09-30 15:00:03.000@ then
+ return -1
+endi
+if $data31 != 69 then
+ return -1
+endi
+if $data40 != @21-09-30 15:00:04.000@ then
+ return -1
+endi
+if $data41 != 92 then
+ return -1
+endi
+if $data50 != @21-09-30 15:00:05.000@ then
+ return -1
+endi
+if $data51 != 115 then
+ return -1
+endi
+if $data60 != @21-09-30 15:00:06.000@ then
+ return -1
+endi
+if $data61 != 6 then
+ return -1
+endi
+if $data70 != @21-09-30 15:00:07.000@ then
+ return -1
+endi
+if $data71 != 7 then
+ return -1
+endi
+if $data80 != @21-09-30 15:00:08.000@ then
+ return -1
+endi
+if $data81 != 8 then
+ return -1
+endi
+if $data90 != @21-09-30 15:00:09.000@ then
+ return -1
+endi
+if $data91 != 9 then
+ return -1
+endi
+
+
+
+sql select count(*) from (select count(*) from meters interval(1s)) interval(1s);
+if $rows != 11 then
+ return -1
+endi
+if $data00 != @21-09-30 15:00:00.000@ then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+if $data10 != @21-09-30 15:00:01.000@ then
+ return -1
+endi
+if $data11 != 1 then
+ return -1
+endi
+if $data20 != @21-09-30 15:00:02.000@ then
+ return -1
+endi
+if $data21 != 1 then
+ return -1
+endi
+if $data30 != @21-09-30 15:00:03.000@ then
+ return -1
+endi
+if $data31 != 1 then
+ return -1
+endi
+if $data40 != @21-09-30 15:00:04.000@ then
+ return -1
+endi
+if $data41 != 1 then
+ return -1
+endi
+if $data50 != @21-09-30 15:00:05.000@ then
+ return -1
+endi
+if $data51 != 1 then
+ return -1
+endi
+if $data60 != @21-09-30 15:00:06.000@ then
+ return -1
+endi
+if $data61 != 1 then
+ return -1
+endi
+if $data70 != @21-09-30 15:00:07.000@ then
+ return -1
+endi
+if $data71 != 1 then
+ return -1
+endi
+if $data80 != @21-09-30 15:00:08.000@ then
+ return -1
+endi
+if $data81 != 1 then
+ return -1
+endi
+if $data90 != @21-09-30 15:00:09.000@ then
+ return -1
+endi
+if $data91 != 1 then
+ return -1
+endi
+
+sql select count(*) from (select count(*) from meters interval(1s) group by tbname);
+if $rows != 1 then
+ return -1
+endi
+if $data00 != 24 then
+ return -1
+endi
+
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/general/parser/tagName_escape.sim b/tests/script/general/parser/tagName_escape.sim
new file mode 100644
index 0000000000000000000000000000000000000000..1dc9121a45ea23201d63dedfb7a6c446ee7b0e87
--- /dev/null
+++ b/tests/script/general/parser/tagName_escape.sim
@@ -0,0 +1,207 @@
+system sh/stop_dnodes.sh
+
+
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c walLevel -v 1
+system sh/exec.sh -n dnode1 -s start
+
+sleep 100
+sql connect
+print ======================== dnode1 start
+
+sql create database tagesc;
+
+sql use tagesc;
+
+print ======================= test create table/stable
+
+sql create stable stb0 (ts timestamp, c0 int) tags (`123` int, `123 456` int, `123.abc` int)
+sql create stable stb1 (ts timestamp, c1 int) tags (`!%^&*()` int)
+sql create stable stb2 (ts timestamp, c2 int) tags (`int` int, `bool` int, `double` int, `INTO` int, `COLUMN` int)
+
+sql create table ctb0 using stb0 (`123`, `123 456`, `123.abc`) tags (1, 1, 1)
+sql create table ctb1 using stb1 (`!%^&*()`) tags (1)
+sql create table ctb2 using stb2 (`int`, `bool`, `double`, `INTO`, `COLUMN`) tags (1, 1, 1, 1, 1)
+
+##check table
+sql describe ctb0;
+if $rows != 5 then
+ return -1
+endi
+if $data20 != @123@ then
+ return -1
+endi
+if $data30 != @123 456@ then
+ return -1
+endi
+if $data40 != @123.abc@ then
+ return -1
+endi
+
+sql describe ctb1;
+if $rows != 3 then
+ return -1
+endi
+if $data20 != @!%^&*()@ then
+ return -1
+endi
+
+sql describe ctb2;
+if $rows != 7 then
+ return -1
+endi
+if $data20 != @int@ then
+ return -1
+endi
+if $data30 != @bool@ then
+ return -1
+endi
+if $data40 != @double@ then
+ return -1
+endi
+if $data50 != @INTO@ then
+ return -1
+endi
+if $data60 != @COLUMN@ then
+ return -1
+endi
+
+print ======================= test Alter tags for stable
+
+##ADD TAG
+sql_error alter stable stb0 add tag `123` int
+sql_error alter stable stb0 add tag `123 456` int
+sql_error alter stable stb0 add tag `123.abc` int
+
+sql alter stable stb0 add tag `!%^&*()` int
+sql alter stable stb0 add tag `int` int
+sql alter stable stb0 add tag `bool` int
+sql alter stable stb0 add tag `double` int
+sql alter stable stb0 add tag `INTO` int
+sql alter stable stb0 add tag `COLUMN` int
+
+
+sql describe stb0;
+if $rows != 11 then
+ return -1
+endi
+if $data50 != @!%^&*()@ then
+ return -1
+endi
+if $data60 != @int@ then
+ return -1
+endi
+if $data70 != @bool@ then
+ return -1
+endi
+if $data80 != @double@ then
+ return -1
+endi
+if $data90 != @INTO@ then
+ return -1
+endi
+
+
+##DROP TAG
+sql alter stable stb0 drop tag `!%^&*()`
+sql alter stable stb0 drop tag `int`
+sql alter stable stb0 drop tag `bool`
+sql alter stable stb0 drop tag `double`
+sql alter stable stb0 drop tag `INTO`
+sql alter stable stb0 drop tag `COLUMN`
+
+
+sql describe stb0;
+if $rows != 5 then
+ return -1
+endi
+if $data20 != @123@ then
+ return -1
+endi
+if $data30 != @123 456@ then
+ return -1
+endi
+if $data40 != @123.abc@ then
+ return -1
+endi
+
+
+##CHANGE TAG
+
+sql alter stable stb0 change tag `123` `321`
+sql alter stable stb0 change tag `123 456` `456 123`
+#sql alter stable stb0 change tag `123.abc` `abc.123`
+#change tag has bug when using dot in tagname
+
+sql describe stb0;
+if $rows != 5 then
+ return -1
+endi
+if $data20 != @321@ then
+ return -1
+endi
+if $data30 != @456 123@ then
+ return -1
+endi
+
+##MODIFY TAG
+
+sql alter stable stb0 add tag `key` binary(10)
+sql alter stable stb0 add tag `value` nchar(10)
+sql alter stable stb0 modify tag `key` binary(15)
+sql alter stable stb0 modify tag `value` nchar(15)
+
+sql describe stb0;
+if $rows != 7 then
+ return -1
+endi
+if $data52 != 15 then
+ return -1
+endi
+if $data62 != 15 then
+ return -1
+endi
+
+
+##SET TAG
+
+sql insert into ctb0 values (now, 1)
+sql insert into ctb1 values (now, 1)
+sql insert into ctb2 values (now, 1)
+
+sql alter table ctb0 set tag `321`=2
+sql alter table ctb0 set tag `456 123`=2
+#sql alter table ctb0 set tag `abc.123`=2
+#change tag has bug when using dot in tagname
+
+
+print ======================= test insert specific tags automatically create table
+
+sql alter table ctb0 set tag `321`=2
+sql alter table ctb0 set tag `321`=2
+sql insert into ctb0_0 using stb0 (`321`, `456 123`, `123.abc`) tags (1, 1, 1) values (now + 10s, 5)
+sql insert into ctb1_0 using stb1 (`!%^&*()`) tags (1) values (now + 10s, 5)
+sql insert into ctb2_0 using stb2 (`int`, `bool`, `double`, `INTO`, `COLUMN`) tags (1, 1, 1, 1, 1) values (now + 10s, 5)
+sql insert into ctb2_1 using stb2 (`int`, `bool`, `INTO`, `COLUMN`) tags (1, 1, 1, 1) values (now + 10s, 5)
+
+sql select * from stb0;
+if $rows != 2 then
+ return -1
+endi
+
+sql select * from stb1;
+if $rows != 2 then
+ return -1
+endi
+
+sql select * from stb2;
+if $rows != 3 then
+ return -1
+endi
+
+if $data24 != NULL then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index ae47241ac4b102bc4d788a5bce74d716a4d20b5b..850f3a19467a8748bba56f80033d4fc0b0bc77a3 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -404,7 +404,7 @@ cd ../../../debug; make
./test.sh -f unique/mnode/mgmt34.sim
./test.sh -f unique/mnode/mgmtr2.sim
-./test.sh -f unique/arbitrator/insert_duplicationTs.sim
+#./test.sh -f unique/arbitrator/insert_duplicationTs.sim
./test.sh -f general/parser/join_manyblocks.sim
./test.sh -f general/parser/stableOp.sim
./test.sh -f general/parser/timestamp.sim
diff --git a/tests/test/c/createNormalTable.c b/tests/test/c/createNormalTable.c
index 60253e2add1ebaa1e6c2c00b073cf13672789346..0dad7eb9b68a5584f4f6347c74b8266299c03da4 100644
--- a/tests/test/c/createNormalTable.c
+++ b/tests/test/c/createNormalTable.c
@@ -233,5 +233,5 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC);
pPrint("%s replica:%d %s", GREEN, replica, NC);
- pPrint("%s start create table performace test %s", GREEN, NC);
+ pPrint("%s start create table performance test %s", GREEN, NC);
}
diff --git a/tests/test/c/createTablePerformance.c b/tests/test/c/createTablePerformance.c
index b94c687f2cba7310949b0a3b12b6f4fc007e5a9a..0e81279819ec8c1c1c0e5601a24193823997c914 100644
--- a/tests/test/c/createTablePerformance.c
+++ b/tests/test/c/createTablePerformance.c
@@ -221,5 +221,5 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%s numOfColumns:%d %s", GREEN, numOfColumns, NC);
pPrint("%s replica:%d %s", GREEN, replica, NC);
- pPrint("%s start create table performace test %s", GREEN, NC);
+ pPrint("%s start create table performance test %s", GREEN, NC);
}
diff --git a/tests/tsim/inc/sim.h b/tests/tsim/inc/sim.h
index 2e19dde3d9c52c20705d131f471a2e0e389589e4..d9a7e13138b125f92d0611614f74abf268eaee70 100644
--- a/tests/tsim/inc/sim.h
+++ b/tests/tsim/inc/sim.h
@@ -135,7 +135,7 @@ typedef struct _script_t {
int32_t numOfLines; // number of lines in the script
int32_t bgScriptLen;
char fileName[MAX_FILE_NAME_LEN]; // script file name
- char error[MAX_ERROR_LEN];
+ char error[TSDB_MAX_BINARY_LEN + 100];
char * optionBuffer;
SCmdLine *lines; // command list
SVariable variables[MAX_VAR_LEN];
@@ -178,4 +178,4 @@ bool simExecuteLineInsertCmd(SScript *script, char *option);
bool simExecuteLineInsertErrorCmd(SScript *script, char *option);
void simVisuallizeOption(SScript *script, char *src, char *dst);
-#endif
\ No newline at end of file
+#endif